instance_id
stringlengths
10
57
base_commit
stringlengths
40
40
created_at
stringdate
2014-04-30 14:58:36
2025-04-30 20:14:11
environment_setup_commit
stringlengths
40
40
hints_text
stringlengths
0
273k
patch
stringlengths
251
7.06M
problem_statement
stringlengths
11
52.5k
repo
stringlengths
7
53
test_patch
stringlengths
231
997k
meta
dict
version
stringclasses
851 values
install_config
dict
requirements
stringlengths
93
34.2k
environment
stringlengths
760
20.5k
FAIL_TO_PASS
listlengths
1
9.39k
FAIL_TO_FAIL
listlengths
0
2.69k
PASS_TO_PASS
listlengths
0
7.87k
PASS_TO_FAIL
listlengths
0
192
license_name
stringclasses
55 values
__index_level_0__
int64
0
21.4k
before_filepaths
listlengths
1
105
after_filepaths
listlengths
1
105
release-depot__koji_wrapper-36
1746f5425bc0787e6ff3372c0435c0c082df9a33
2018-07-09 18:37:05
b0d0c3a02d3c14d325447f68464bf842ea0dff5d
diff --git a/koji_wrapper/tag.py b/koji_wrapper/tag.py index 9a1792b..03e7c6c 100644 --- a/koji_wrapper/tag.py +++ b/koji_wrapper/tag.py @@ -58,12 +58,18 @@ class KojiTag(KojiWrapper): def tagged_list(self, tagged_list): self.__tagged_list = tagged_list - def builds(self): + def builds(self, **kwargs): """ + This method wraps the koji client method listTagged: + + https://pagure.io/koji/blob/master/f/hub/kojihub.py + + :param **kwargs: Any valid named parameter accepted by the koji + client method listTagged: :returns: list of matching tagged build objects from koji """ if self.tagged_list is None: - self._filter_tagged(self.session.listTagged(self.tag)) + self._filter_tagged(self.session.listTagged(self.tag, **kwargs)) return self.tagged_list def _filter_tagged(self, tagged_builds):
RFE: support for KojiTag Object to use koji tag inheritance or not when listing builds This maps to call to koji.listBuilds(tag="<tag>", inherit=True) This allows tooling to maybe use KojiTag(tag="<tag>", inherit=True) vs KojiTag(tag="<tag>", [inherit=False]) this corresponds to koji list-tagged --inherit <tag> or koji list-tagged <tag>
release-depot/koji_wrapper
diff --git a/tests/unit/test_koji_tag.py b/tests/unit/test_koji_tag.py index 74e97cf..f791911 100644 --- a/tests/unit/test_koji_tag.py +++ b/tests/unit/test_koji_tag.py @@ -73,6 +73,20 @@ def test_gets_builds(sample_tagged_builds): assert kt.session.listTagged.called +def test_passes_builds_extra_args(sample_tagged_builds): + """ + GIVEN we have a KojiTag object + WHEN we call the builds() method for the first time with a parameter + THEN the tagged builds should be returned + AND the listTagged method of the session object should be called + with the expected parameter. + """ + kt = build_tag('foo') + kt.session.listTagged = MagicMock(return_value=sample_tagged_builds) + assert kt.builds(inherit=True) == sample_tagged_builds + assert kt.session.listTagged.called_with(inherit=True) + + def test_caches_builds(sample_tagged_builds): """ GIVEN we have a KojiTag object
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "numpy>=1.16.0", "pandas>=1.0.0" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 gssapi==1.9.0 idna==3.10 iniconfig==2.1.0 koji==1.35.2 -e git+https://github.com/release-depot/koji_wrapper.git@1746f5425bc0787e6ff3372c0435c0c082df9a33#egg=koji_wrapper numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 requests-gssapi==1.3.0 six==1.17.0 tomli==2.2.1 tzdata==2025.2 urllib3==2.3.0
name: koji_wrapper channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - gssapi==1.9.0 - idna==3.10 - iniconfig==2.1.0 - koji==1.35.2 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - requests-gssapi==1.3.0 - six==1.17.0 - tomli==2.2.1 - tzdata==2025.2 - urllib3==2.3.0 prefix: /opt/conda/envs/koji_wrapper
[ "tests/unit/test_koji_tag.py::test_passes_builds_extra_args" ]
[]
[ "tests/unit/test_koji_tag.py::test_validates_required_fields", "tests/unit/test_koji_tag.py::test_sets_tag", "tests/unit/test_koji_tag.py::test_sets_blacklist_with_str", "tests/unit/test_koji_tag.py::test_sets_blacklist_with_list", "tests/unit/test_koji_tag.py::test_gets_builds", "tests/unit/test_koji_tag.py::test_caches_builds", "tests/unit/test_koji_tag.py::test_filters_builds_by_blacklist", "tests/unit/test_koji_tag.py::test_filters_builds_by_nvr", "tests/unit/test_koji_tag.py::test_filters_builds_by_both" ]
[]
MIT License
2,754
[ "koji_wrapper/tag.py" ]
[ "koji_wrapper/tag.py" ]
ELIFE-ASU__Neet-114
5a45a92c1b3a564290487ecc1483b4866e26eaf5
2018-07-09 19:03:36
5a45a92c1b3a564290487ecc1483b4866e26eaf5
diff --git a/neet/synchronous.py b/neet/synchronous.py index 1c6ed88..8f59454 100644 --- a/neet/synchronous.py +++ b/neet/synchronous.py @@ -35,7 +35,7 @@ def trajectory(net, state, timesteps=1, encode=False): :param state: the network state :param timesteps: the number of steps in the trajectory :param encode: encode the states as integers - :yields: the next state in the trajectory + :returns: the trajectory as a list :raises TypeError: if net is not a network :raises ValueError: if ``timesteps < 1`` """ @@ -44,6 +44,7 @@ def trajectory(net, state, timesteps=1, encode=False): if timesteps < 1: raise ValueError("number of steps must be positive, non-zero") + traj = [] state = copy.copy(state) if encode: if is_fixed_sized(net): @@ -51,23 +52,24 @@ def trajectory(net, state, timesteps=1, encode=False): else: state_space = net.state_space(len(state)) - yield state_space._unsafe_encode(state) + traj.append(state_space._unsafe_encode(state)) net.update(state) - yield state_space._unsafe_encode(state) + traj.append(state_space._unsafe_encode(state)) for _ in range(1,timesteps): net._unsafe_update(state) - yield state_space._unsafe_encode(state) + traj.append(state_space._unsafe_encode(state)) else: - yield copy.copy(state) + traj.append(copy.copy(state)) net.update(state) - yield copy.copy(state) + traj.append(copy.copy(state)) for _ in range(1, timesteps): net._unsafe_update(state) - yield copy.copy(state) + traj.append(copy.copy(state)) + return traj def transitions(net, size=None, encode=False): """ @@ -94,7 +96,7 @@ def transitions(net, size=None, encode=False): :param net: the network :param size: the size of the network (``None`` if fixed sized) :param encode: encode the states as integers - :yields: the one-state transitions + :returns: the one-state transitions as an array :raises TypeError: if ``net`` is not a network :raises ValueError: if ``net`` is fixed sized and ``size`` is not ``None`` :raises ValueError: if ``net`` is not fixed sized and ``size`` is ``None`` @@ -111,12 +113,15 @@ def transitions(net, size=None, encode=False): raise ValueError("size must not be None for variable sized networks") state_space = net.state_space(size) + trans = [] for state in state_space: net._unsafe_update(state) if encode: - yield state_space._unsafe_encode(state) + trans.append(state_space._unsafe_encode(state)) else: - yield state + trans.append(state) + + return trans def transition_graph(net, size=None): """ @@ -172,15 +177,14 @@ def attractors(net, size=None): :param net: the network or the transition graph :param size: the size of the network (``None`` if fixed sized) - :returns: a generator of attractors + :returns: a list of attractor cycles :raises TypeError: if ``net`` is not a network or a ``networkx.DiGraph`` :raises ValueError: if ``net`` is fixed sized and ``size`` is not ``None`` :raises ValueError: if ``net`` is a transition graph and ``size`` is not ``None`` :raises ValueError: if ``net`` is not fixed sized and ``size`` is ``None`` """ if isinstance(net, nx.DiGraph): - for attr in nx.simple_cycles(net): - yield attr + return list(nx.simple_cycles(net)) elif not is_network(net): raise TypeError("net must be a network or a networkx DiGraph") elif is_fixed_sized(net) and size is not None: @@ -188,6 +192,7 @@ def attractors(net, size=None): elif not is_fixed_sized(net) and size is None: raise ValueError("variable sized networks require a size") else: + cycles = [] # Get the state transitions # (array of next state indexed by current state) trans = list(transitions(net, size=size, encode=True)) @@ -259,7 +264,8 @@ def attractors(net, size=None): # Yield the cycle if we found one if len(cycle) != 0: - yield cycle + cycles.append(cycle) + return cycles def basins(net, size=None): """
Death to Generators Generators are great when you may have to iterate over computations that are too large to store in memory. This is certainly a possibility for much of Neet's algorithms. It was for this reason that @dglmoore over-engineered Neet's functions to `yield` instead of `return` where possible. While it may seem like this is a good thing, it is really just cumbersome for users, implementers and maintainers. The problem it aimed to solve is a non-issue as most of the time there are computational limits other than memory that come into play around the same time as memory issues. We should refactor to use lists in place of generators unless it makes sense to do otherwise.
ELIFE-ASU/Neet
diff --git a/test/test_synchronous.py b/test/test_synchronous.py index ec1bb85..eed14a2 100644 --- a/test/test_synchronous.py +++ b/test/test_synchronous.py @@ -20,13 +20,13 @@ class TestSynchronous(unittest.TestCase): ``trajectory`` should raise a type error if ``net`` is not a network """ with self.assertRaises(TypeError): - list(trajectory(5, [1, 2, 3])) + trajectory(5, [1, 2, 3]) with self.assertRaises(TypeError): - list(trajectory(MockObject(), [1, 2, 3])) + trajectory(MockObject(), [1, 2, 3]) with self.assertRaises(TypeError): - list(trajectory(MockFixedSizedNetwork, [1, 2, 3])) + trajectory(MockFixedSizedNetwork, [1, 2, 3]) def test_trajectory_too_short(self): """ @@ -34,10 +34,10 @@ class TestSynchronous(unittest.TestCase): than 1 """ with self.assertRaises(ValueError): - list(trajectory(MockFixedSizedNetwork(), [1, 2, 3], timesteps=0)) + trajectory(MockFixedSizedNetwork(), [1, 2, 3], timesteps=0) with self.assertRaises(ValueError): - list(trajectory(MockFixedSizedNetwork(), [1, 2, 3], timesteps=-1)) + trajectory(MockFixedSizedNetwork(), [1, 2, 3], timesteps=-1) def test_trajectory_eca(self): """ @@ -45,14 +45,14 @@ class TestSynchronous(unittest.TestCase): """ rule30 = ECA(30) with self.assertRaises(ValueError): - list(trajectory(rule30, [])) + trajectory(rule30, []) xs = [0, 1, 0] - got = list(trajectory(rule30, xs)) + got = trajectory(rule30, xs) self.assertEqual([0, 1, 0], xs) self.assertEqual([[0, 1, 0], [1, 1, 1]], got) - got = list(trajectory(rule30, xs, timesteps=2)) + got = trajectory(rule30, xs, timesteps=2) self.assertEqual([0, 1, 0], xs) self.assertEqual([[0, 1, 0], [1, 1, 1], [0, 0, 0]], got) @@ -62,14 +62,14 @@ class TestSynchronous(unittest.TestCase): """ rule30 = ECA(30) with self.assertRaises(ValueError): - list(trajectory(rule30, [], encode=True)) + trajectory(rule30, [], encode=True) state = [0, 1, 0] - got = list(trajectory(rule30, state, encode=True)) + got = trajectory(rule30, state, encode=True) self.assertEqual([0, 1, 0], state) self.assertEqual([2, 7], got) - got = list(trajectory(rule30, state, timesteps=2, encode=True)) + got = trajectory(rule30, state, timesteps=2, encode=True) self.assertEqual([0, 1, 0], state) self.assertEqual([2, 7, 0], got) @@ -84,11 +84,11 @@ class TestSynchronous(unittest.TestCase): ) state = [0, 0] - got = list(trajectory(net, state)) + got = trajectory(net, state) self.assertEqual([0, 0], state) self.assertEqual([[0, 0], [0, 1]], got) - got = list(trajectory(net, state, timesteps=3)) + got = trajectory(net, state, timesteps=3) self.assertEqual([0, 0], state) self.assertEqual([[0, 0], [0, 1], [0, 1], [0, 1]], got) @@ -103,11 +103,11 @@ class TestSynchronous(unittest.TestCase): ) state = [0, 0] - got = list(trajectory(net, state, encode=True)) + got = trajectory(net, state, encode=True) self.assertEqual([0, 0], state) self.assertEqual([0, 2], got) - got = list(trajectory(net, state, timesteps=3, encode=True)) + got = trajectory(net, state, timesteps=3, encode=True) self.assertEqual([0, 0], state) self.assertEqual([0, 2, 2, 2], got) @@ -119,7 +119,7 @@ class TestSynchronous(unittest.TestCase): ((0, 2), {'01', '10', '11'}), ((0, 1), {'11'})]) state = [0, 1, 0] - got = list(trajectory(net, state, 3)) + got = trajectory(net, state, 3) self.assertEqual([[0, 1, 0], [1, 0, 0], [0, 1, 0], [1, 0, 0]], got) self.assertEqual([0, 1, 0], state) @@ -131,7 +131,7 @@ class TestSynchronous(unittest.TestCase): ``transitions`` should raise a type error if ``net`` is not a network """ with self.assertRaises(TypeError): - list(transitions(MockObject(), 5)) + transitions(MockObject(), 5) def test_transitions_not_fixed_sized(self): """ @@ -139,7 +139,7 @@ class TestSynchronous(unittest.TestCase): and ``size`` is ``None`` """ with self.assertRaises(ValueError): - list(transitions(ECA(30), size=None)) + transitions(ECA(30), size=None) def test_transitions_fixed_sized(self): """ @@ -147,7 +147,7 @@ class TestSynchronous(unittest.TestCase): ``size`` is not ``None`` """ with self.assertRaises(ValueError): - list(transitions(MockFixedSizedNetwork, size=3)) + transitions(MockFixedSizedNetwork, size=3) def test_transitions_eca(self): """ @@ -155,13 +155,13 @@ class TestSynchronous(unittest.TestCase): """ rule30 = ECA(30) - got = list(transitions(rule30, size=1)) + got = transitions(rule30, size=1) self.assertEqual([[0], [0]], got) - got = list(transitions(rule30, size=2)) + got = transitions(rule30, size=2) self.assertEqual([[0, 0], [1, 0], [0, 1], [0, 0]], got) - got = list(transitions(rule30, size=3)) + got = transitions(rule30, size=3) self.assertEqual([[0, 0, 0], [1, 1, 1], [1, 1, 1], [1, 0, 0], [1, 1, 1], [0, 0, 1], [0, 1, 0], [0, 0, 0]], got) @@ -171,13 +171,13 @@ class TestSynchronous(unittest.TestCase): """ rule30 = ECA(30) - got = list(transitions(rule30, size=1, encode=True)) + got = transitions(rule30, size=1, encode=True) self.assertEqual([0, 0], got) - got = list(transitions(rule30, size=2, encode=True)) + got = transitions(rule30, size=2, encode=True) self.assertEqual([0, 1, 2, 0], got) - got = list(transitions(rule30, size=3, encode=True)) + got = transitions(rule30, size=3, encode=True) self.assertEqual([0, 7, 7, 1, 7, 4, 2, 0], got) def test_transitions_wtnetwork(self): @@ -190,7 +190,7 @@ class TestSynchronous(unittest.TestCase): theta=WTNetwork.positive_threshold ) - got = list(transitions(net)) + got = transitions(net) self.assertEqual([[0, 1], [1, 0], [0, 1], [1, 1]], got) def test_transitions_wtnetwork_encoded(self): @@ -203,7 +203,7 @@ class TestSynchronous(unittest.TestCase): theta=WTNetwork.positive_threshold ) - got = list(transitions(net, encode=True)) + got = transitions(net, encode=True) self.assertEqual([2, 1, 2, 3], got) def test_transitions_logicnetwork(self): @@ -211,7 +211,7 @@ class TestSynchronous(unittest.TestCase): test `transitions` on `LogicNetwork`s """ net = LogicNetwork([((1,), {'0', '1'}), ((0,), {'1'})]) - got = list(transitions(net)) + got = transitions(net) self.assertEqual([[1, 0], [1, 1], [1, 0], [1, 1]], got) def test_transitions_logicnetwork_encoded(self): @@ -219,7 +219,7 @@ class TestSynchronous(unittest.TestCase): test `transitions` on `LogicNetwork`s, states encoded """ net = LogicNetwork([((1,), {'0', '1'}), ((0,), {'1'})]) - got = list(transitions(net, encode=True)) + got = transitions(net, encode=True) self.assertEqual([1, 3, 1, 3], got) def test_transition_graph_not_network(self): @@ -268,13 +268,13 @@ class TestSynchronous(unittest.TestCase): nor a networkx digraph """ with self.assertRaises(TypeError): - list(attractors('blah')) + attractors('blah') with self.assertRaises(TypeError): - list(attractors(MockObject())) + attractors(MockObject()) with self.assertRaises(TypeError): - list(attractors(nx.Graph())) + attractors(nx.Graph()) def test_attractors_variable_sized(self): """ @@ -282,7 +282,7 @@ class TestSynchronous(unittest.TestCase): network and ``size`` is ``None`` """ with self.assertRaises(ValueError): - list(attractors(ECA(30), size=None)) + attractors(ECA(30), size=None) def test_attractors_fixed_sized(self): """ @@ -290,10 +290,10 @@ class TestSynchronous(unittest.TestCase): network or a networkx digraph, and ``size`` is not ``None`` """ with self.assertRaises(ValueError): - list(attractors(MockFixedSizedNetwork(), size=5)) + attractors(MockFixedSizedNetwork(), size=5) # with self.assertRaises(ValueError): - # list(attractors(nx.DiGraph(), size=5)) + # attractors(nx.DiGraph(), size=5) def test_attractors_eca(self): """ @@ -304,7 +304,7 @@ class TestSynchronous(unittest.TestCase): (ECA(110), 3, 1), (ECA(110), 4, 3), (ECA(110), 5, 1), (ECA(110), 6, 3)] for rule, width, size in networks: - self.assertEqual(size, len(list(attractors(rule, width)))) + self.assertEqual(size, len(attractors(rule, width))) def test_attractors_wtnetworks(self): """ @@ -312,14 +312,14 @@ class TestSynchronous(unittest.TestCase): """ networks = [(s_pombe, 13), (s_cerevisiae, 7), (c_elegans, 5)] for net, size in networks: - self.assertEqual(size, len(list(attractors(net)))) + self.assertEqual(size, len(attractors(net))) def test_attractors_transition_graph(self): """ test ``attractors`` on ``s_pombe`` transition graph """ - att_from_graph = list(attractors(transition_graph(s_pombe))) - att_from_network = list(attractors(s_pombe)) + att_from_graph = attractors(transition_graph(s_pombe)) + att_from_network = attractors(s_pombe) for (a, b) in zip(att_from_graph, att_from_network): a.sort()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 decorator==4.4.2 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/ELIFE-ASU/Neet.git@5a45a92c1b3a564290487ecc1483b4866e26eaf5#egg=neet networkx==2.5.1 nose==1.3.7 numpy==1.19.5 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyinform==0.2.0 pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: Neet channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - decorator==4.4.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - networkx==2.5.1 - nose==1.3.7 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyinform==0.2.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/Neet
[ "test/test_synchronous.py::TestSynchronous::test_attractors_eca", "test/test_synchronous.py::TestSynchronous::test_attractors_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_attractors_invalid_net", "test/test_synchronous.py::TestSynchronous::test_attractors_transition_graph", "test/test_synchronous.py::TestSynchronous::test_attractors_variable_sized", "test/test_synchronous.py::TestSynchronous::test_attractors_wtnetworks", "test/test_synchronous.py::TestSynchronous::test_trajectory_eca", "test/test_synchronous.py::TestSynchronous::test_trajectory_eca_encoded", "test/test_synchronous.py::TestSynchronous::test_trajectory_logicnetwork", "test/test_synchronous.py::TestSynchronous::test_trajectory_not_network", "test/test_synchronous.py::TestSynchronous::test_trajectory_too_short", "test/test_synchronous.py::TestSynchronous::test_trajectory_wtnetwork", "test/test_synchronous.py::TestSynchronous::test_trajectory_wtnetwork_encoded", "test/test_synchronous.py::TestSynchronous::test_transitions_eca", "test/test_synchronous.py::TestSynchronous::test_transitions_eca_encoded", "test/test_synchronous.py::TestSynchronous::test_transitions_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_transitions_logicnetwork", "test/test_synchronous.py::TestSynchronous::test_transitions_logicnetwork_encoded", "test/test_synchronous.py::TestSynchronous::test_transitions_not_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_transitions_not_network", "test/test_synchronous.py::TestSynchronous::test_transitions_wtnetwork", "test/test_synchronous.py::TestSynchronous::test_transitions_wtnetwork_encoded" ]
[ "test/test_synchronous.py::TestSynchronous::test_basin_entropy_eca", "test/test_synchronous.py::TestSynchronous::test_basin_entropy_transition_graph", "test/test_synchronous.py::TestSynchronous::test_basin_entropy_wtnetwork", "test/test_synchronous.py::TestSynchronous::test_basin_entropy_wtnetwork_base10", "test/test_synchronous.py::TestSynchronous::test_basins_eca", "test/test_synchronous.py::TestSynchronous::test_basins_transition_graph", "test/test_synchronous.py::TestSynchronous::test_basins_wtnetwork" ]
[ "test/test_synchronous.py::TestSynchronous::test_basin_entropy_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_basin_entropy_invalid_net", "test/test_synchronous.py::TestSynchronous::test_basin_entropy_variable_sized", "test/test_synchronous.py::TestSynchronous::test_basins_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_basins_invalid_net", "test/test_synchronous.py::TestSynchronous::test_basins_variable_sized", "test/test_synchronous.py::TestSynchronous::test_timeseries_eca", "test/test_synchronous.py::TestSynchronous::test_timeseries_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_timeseries_not_network", "test/test_synchronous.py::TestSynchronous::test_timeseries_too_short", "test/test_synchronous.py::TestSynchronous::test_timeseries_variable_sized", "test/test_synchronous.py::TestSynchronous::test_timeseries_wtnetworks", "test/test_synchronous.py::TestSynchronous::test_transition_graph_eca", "test/test_synchronous.py::TestSynchronous::test_transition_graph_fixed_sized", "test/test_synchronous.py::TestSynchronous::test_transition_graph_not_network", "test/test_synchronous.py::TestSynchronous::test_transition_graph_s_pombe", "test/test_synchronous.py::TestSynchronous::test_transition_graph_variable_sized", "test/test_synchronous.py::TestLandscape::test_attractor_lengths", "test/test_synchronous.py::TestLandscape::test_attractors_eca", "test/test_synchronous.py::TestLandscape::test_attractors_wtnetworks", "test/test_synchronous.py::TestLandscape::test_basin_entropy_eca", "test/test_synchronous.py::TestLandscape::test_basin_entropy_wtnetwork", "test/test_synchronous.py::TestLandscape::test_basin_sizes", "test/test_synchronous.py::TestLandscape::test_basins_eca", "test/test_synchronous.py::TestLandscape::test_canary", "test/test_synchronous.py::TestLandscape::test_graph_eca", "test/test_synchronous.py::TestLandscape::test_graph_wtnetworks", "test/test_synchronous.py::TestLandscape::test_heights", "test/test_synchronous.py::TestLandscape::test_in_degree", "test/test_synchronous.py::TestLandscape::test_init_fixed_sized", "test/test_synchronous.py::TestLandscape::test_init_not_fixed_sized", "test/test_synchronous.py::TestLandscape::test_init_not_network", "test/test_synchronous.py::TestLandscape::test_is_state_space", "test/test_synchronous.py::TestLandscape::test_recurrence_times", "test/test_synchronous.py::TestLandscape::test_timeseries_eca", "test/test_synchronous.py::TestLandscape::test_timeseries_too_short", "test/test_synchronous.py::TestLandscape::test_timeseries_wtnetworks", "test/test_synchronous.py::TestLandscape::test_trajectory_eca", "test/test_synchronous.py::TestLandscape::test_trajectory_logicnetwork", "test/test_synchronous.py::TestLandscape::test_trajectory_too_short", "test/test_synchronous.py::TestLandscape::test_trajectory_wtnetwork", "test/test_synchronous.py::TestLandscape::test_transitions_eca", "test/test_synchronous.py::TestLandscape::test_transitions_logicnetwork", "test/test_synchronous.py::TestLandscape::test_transitions_spombe", "test/test_synchronous.py::TestLandscape::test_transitions_wtnetwork" ]
[]
MIT License
2,755
[ "neet/synchronous.py" ]
[ "neet/synchronous.py" ]
PlasmaPy__PlasmaPy-510
d22b5a2d70a10c0e8e145c096cc691450c6a0f05
2018-07-10 07:31:43
24113f1659d809930288374f6b1f95dc573aff47
diff --git a/plasmapy/atomic/particle_class.py b/plasmapy/atomic/particle_class.py index 0a5d2c92..b7e7ed5c 100644 --- a/plasmapy/atomic/particle_class.py +++ b/plasmapy/atomic/particle_class.py @@ -658,6 +658,42 @@ def element_name(self) -> str: raise InvalidElementError(_category_errmsg(self, 'element')) return self._attributes['element name'] + @property + def isotope_name(self) -> str: + """ + Return the name of the element along with the isotope + symbol if the particle corresponds to an isotope, and + `None` otherwise. + + If the particle is not a valid element, then this + attribute will raise an `~plasmapy.utils.InvalidElementError`. + If it is not an isotope, then this attribute will raise an + `~plasmapy.utils.InvalidIsotopeError`. + + Examples + -------- + >>> deuterium = Particle("D") + >>> deuterium.isotope_name + 'deuterium' + >>> iron_isotope = Particle("Fe-56", Z=16) + >>> iron_isotope.isotope_name + 'iron-56' + + """ + if not self.element: + raise InvalidElementError(_category_errmsg(self.particle, 'element')) + elif not self.isotope: + raise InvalidIsotopeError(_category_errmsg(self, 'isotope')) + + if self.isotope == "D": + isotope_name = "deuterium" + elif self.isotope == "T": + isotope_name = "tritium" + else: + isotope_name = f"{self.element_name}-{self.mass_number}" + + return isotope_name + @property def integer_charge(self) -> int: """
Add an `isotope_name` attribute on `Particle` class > We could definitely add an `isotope_name` attribute as well, which for isotopes in general would return something like `'iodine-131'` plus `'deuterium'` and `'tritium'` for those special cases. The `element` and `element_name` attributes just return the name of the element without respect to the isotope. I'll try to remember to create a separate issue for this. [as mentioned here](https://github.com/PlasmaPy/PlasmaPy/pull/468/files#r189747292) by @namurphy.
PlasmaPy/PlasmaPy
diff --git a/plasmapy/atomic/tests/test_particle_class.py b/plasmapy/atomic/tests/test_particle_class.py index 17558a58..9d3308e0 100644 --- a/plasmapy/atomic/tests/test_particle_class.py +++ b/plasmapy/atomic/tests/test_particle_class.py @@ -31,6 +31,7 @@ {'particle': 'n', 'element': None, 'isotope': None, + 'isotope_name': InvalidElementError, 'ionic_symbol': None, 'roman_symbol': None, 'is_ion': False, @@ -51,6 +52,7 @@ 'element': 'H', 'element_name': 'hydrogen', 'isotope': 'H-1', + 'isotope_name': 'hydrogen-1', 'ionic_symbol': 'p+', 'roman_symbol': 'H-1 II', 'is_ion': True, @@ -85,6 +87,7 @@ 'element': None, 'element_name': InvalidElementError, 'isotope': None, + 'isotope_name': InvalidElementError, 'ionic_symbol': None, 'roman_symbol': None, 'is_ion': False, @@ -106,6 +109,7 @@ 'element': None, 'element_name': InvalidElementError, 'isotope': None, + 'isotope_name': InvalidElementError, 'ionic_symbol': None, 'roman_symbol': None, 'is_ion': False, @@ -129,6 +133,7 @@ {'particle': 'e+', 'element': None, 'isotope': None, + 'isotope_name': InvalidElementError, 'ionic_symbol': None, 'roman_symbol': None, 'is_ion': False, @@ -156,6 +161,7 @@ {'particle': 'H', 'element': 'H', 'isotope': None, + 'isotope_name': InvalidIsotopeError, 'ionic_symbol': None, 'roman_symbol': ChargeError, 'is_ion': False, @@ -177,6 +183,7 @@ {'particle': 'H 1-', 'element': 'H', 'isotope': None, + 'isotope_name': InvalidIsotopeError, 'ionic_symbol': 'H 1-', 'roman_symbol': roman.OutOfRangeError, 'is_ion': True, @@ -196,6 +203,7 @@ 'particle': 'H-1 0+', 'element': 'H', 'isotope': 'H-1', + 'isotope_name': 'hydrogen-1', 'ionic_symbol': 'H-1 0+', 'roman_symbol': 'H-1 I', 'is_ion': False, @@ -218,6 +226,7 @@ 'element': 'H', 'element_name': 'hydrogen', 'isotope': 'D', + 'isotope_name': 'deuterium', 'ionic_symbol': 'D 1+', 'roman_symbol': 'D II', 'is_ion': True, @@ -238,6 +247,7 @@ {'particle': 'T 1+', 'element': 'H', 'isotope': 'T', + 'isotope_name': 'tritium', 'ionic_symbol': 'T 1+', 'roman_symbol': 'T II', 'is_ion': True, @@ -257,6 +267,7 @@ 'element': 'Fe', 'element_name': 'iron', 'isotope': 'Fe-56', + 'isotope_name': 'iron-56', 'ionic_symbol': 'Fe-56 17+', 'roman_symbol': 'Fe-56 XVIII', 'is_electron': False, @@ -277,6 +288,7 @@ 'element': 'He', 'element_name': 'helium', 'isotope': 'He-4', + 'isotope_name': 'helium-4', 'ionic_symbol': 'He-4 2+', 'roman_symbol': 'He-4 III', 'is_ion': True, @@ -293,6 +305,7 @@ 'element': 'Li', 'element_name': 'lithium', 'isotope': 'Li-7', + 'isotope_name': 'lithium-7', 'ionic_symbol': None, 'roman_symbol': ChargeError, 'is_ion': False, @@ -309,6 +322,7 @@ {'particle': 'Cn-276 22+', 'element': 'Cn', 'isotope': 'Cn-276', + 'isotope_name': 'copernicium-276', 'ionic_symbol': 'Cn-276 22+', 'roman_symbol': 'Cn-276 XXIII', 'is_ion': True, @@ -324,6 +338,7 @@ {'particle': 'mu-', 'element': None, 'isotope': None, + 'isotope_name': InvalidElementError, 'ionic_symbol': None, 'roman_symbol': None, 'is_ion': False, @@ -338,6 +353,7 @@ {'particle': 'nu_tau', 'element': None, 'isotope': None, + 'isotope_name': InvalidElementError, 'mass': MissingAtomicDataError, 'integer_charge': 0, 'mass_number': InvalidIsotopeError,
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[optional]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asteval==0.9.31 astropy==4.3.1 certifi @ file:///croot/certifi_1671487769961/work/certifi colorama==0.4.6 cycler==0.11.0 Cython==3.0.12 exceptiongroup==1.2.2 fonttools==4.38.0 future==1.0.0 h5py==3.8.0 importlib-metadata==6.7.0 iniconfig==2.0.0 kiwisolver==1.4.5 lmfit==1.2.2 matplotlib==3.5.3 mpmath==1.3.0 numpy==1.21.6 packaging==24.0 Pillow==9.5.0 -e git+https://github.com/PlasmaPy/PlasmaPy.git@d22b5a2d70a10c0e8e145c096cc691450c6a0f05#egg=plasmapy pluggy==1.2.0 pyerfa==2.0.0.3 pyparsing==3.1.4 pytest==7.4.4 python-dateutil==2.9.0.post0 roman==4.2 scipy==1.7.3 six==1.17.0 tomli==2.0.1 typing_extensions==4.7.1 uncertainties==3.1.7 zipp==3.15.0
name: PlasmaPy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asteval==0.9.31 - astropy==4.3.1 - colorama==0.4.6 - cycler==0.11.0 - cython==3.0.12 - exceptiongroup==1.2.2 - fonttools==4.38.0 - future==1.0.0 - h5py==3.8.0 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - kiwisolver==1.4.5 - lmfit==1.2.2 - matplotlib==3.5.3 - mpmath==1.3.0 - numpy==1.21.6 - packaging==24.0 - pillow==9.5.0 - pluggy==1.2.0 - pyerfa==2.0.0.3 - pyparsing==3.1.4 - pytest==7.4.4 - python-dateutil==2.9.0.post0 - roman==4.2 - scipy==1.7.3 - six==1.17.0 - tomli==2.0.1 - typing-extensions==4.7.1 - uncertainties==3.1.7 - zipp==3.15.0 prefix: /opt/conda/envs/PlasmaPy
[ "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[neutron-kwargs0-expected_dict0]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[p+-kwargs1-expected_dict1]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[p--kwargs2-expected_dict2]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[e--kwargs3-expected_dict3]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[e+-kwargs4-expected_dict4]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H-kwargs5-expected_dict5]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H-1", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[D+-kwargs8-expected_dict8]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[tritium-kwargs9-expected_dict9]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Fe-kwargs10-expected_dict10]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[alpha-kwargs11-expected_dict11]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Cn-276-kwargs13-expected_dict13]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[muon-kwargs14-expected_dict14]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[nu_tau-kwargs15-expected_dict15]" ]
[ "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Li-kwargs12-expected_dict12]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[a-kwargs0--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[d+-kwargs1--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[H-kwargs2--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-818-kwargs3--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-12-kwargs4--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-kwargs5--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-kwargs6--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs7--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs8-.atomic_number-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[alpha-kwargs9-.standard_atomic_weight-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Fe-56-kwargs10-.standard_atomic_weight-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs11-.standard_atomic_weight-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[tau--kwargs12-.element_name-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[tau+-kwargs13-.atomic_number-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[neutron-kwargs14-.atomic_number-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[H-kwargs15-.mass_number-InvalidIsotopeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[neutron-kwargs16-.mass_number-InvalidIsotopeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[He-kwargs17-.charge-ChargeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[He-kwargs18-.integer_charge-ChargeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Fe-kwargs19-.spin-MissingAtomicDataError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[nu_e-kwargs20-.mass-MissingAtomicDataError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Og-kwargs21-.standard_atomic_weight-MissingAtomicDataError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[arg22-kwargs22--TypeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[H-----kwargs0--AtomicWarning]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[alpha-kwargs1--AtomicWarning]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[alpha-kwargs2--AtomicWarning]" ]
[ "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles0]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles1]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles2]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles3]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles4]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles5]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles6]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles7]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles8]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles9]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles10]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_cmp", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[n-neutron]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[p+-proton]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[H-1-p+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[H-1", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[D-D+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[T-T+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[He-4-alpha]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[Fe-56-Fe-56", "plasmapy/atomic/tests/test_particle_class.py::test_particle_half_life_string", "plasmapy/atomic/tests/test_particle_class.py::test_particle_is_electron[Particle(\"e-\")-True]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_is_electron[Particle(\"p+\")-False]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_bool_error", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[p+-p-]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[n-antineutron]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[e--e+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[mu--mu+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[tau--tau+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_e-anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_mu-anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_tau-anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[p+-p-]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[n-antineutron]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[e--e+]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[mu--mu+]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[tau--tau+]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_e-anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_mu-anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_tau-anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::test_unary_operator_for_elements", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_e]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,756
[ "plasmapy/atomic/particle_class.py" ]
[ "plasmapy/atomic/particle_class.py" ]
conan-io__conan-3187
c3baafb780b6e5498f8bd460426901d9d5ab10e1
2018-07-10 10:30:23
f59b0d5773ca17e222236b1b6b55785f03539216
diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py index f59d4441e..02c9bc118 100644 --- a/.ci/jenkins/conf.py +++ b/.ci/jenkins/conf.py @@ -9,7 +9,7 @@ winpylocation = {"py27": "C:\\Python27\\python.exe", macpylocation = {"py27": "/usr/bin/python", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python", "py34": "/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python", - "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.3/bin/python"} + "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python"} linuxpylocation = {"py27": "/usr/bin/python2.7", "py34": "/usr/bin/python3.4", diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py index 1b91d7a0e..37c7597bd 100644 --- a/conans/client/generators/visualstudio.py +++ b/conans/client/generators/visualstudio.py @@ -1,5 +1,8 @@ +import os + from conans.model import Generator from conans.paths import BUILD_INFO_VISUAL_STUDIO +import re class VisualStudioGenerator(Generator): @@ -76,4 +79,8 @@ class VisualStudioGenerator(Generator): 'exe_flags': " ".join(self._deps_build_info.exelinkflags) } formatted_template = self.template.format(**fields) + userprofile = os.getenv("USERPROFILE") + if userprofile: + userprofile = userprofile.replace("\\", "/") + formatted_template = re.sub(userprofile, "$(USERPROFILE)", formatted_template, flags=re.I) return formatted_template diff --git a/conans/client/remote_manager.py b/conans/client/remote_manager.py index bd37186bf..929174655 100644 --- a/conans/client/remote_manager.py +++ b/conans/client/remote_manager.py @@ -12,7 +12,7 @@ from conans.model.manifest import gather_files from conans.paths import PACKAGE_TGZ_NAME, CONANINFO, CONAN_MANIFEST, CONANFILE, EXPORT_TGZ_NAME, \ rm_conandir, EXPORT_SOURCES_TGZ_NAME, EXPORT_SOURCES_DIR_OLD from conans.util.files import gzopen_without_timestamps, is_dirty,\ - make_read_only + make_read_only, set_dirty, clean_dirty from conans.util.files import tar_extract, rmdir, exception_message_safe, mkdir from conans.util.files import touch_folder from conans.util.log import logger @@ -23,6 +23,7 @@ from conans.util.tracer import (log_package_upload, log_recipe_upload, log_package_download) from conans.client.source import merge_directories from conans.util.env_reader import get_env +from conans.search.search import filter_packages class RemoteManager(object): @@ -39,6 +40,14 @@ class RemoteManager(object): t1 = time.time() export_folder = self._client_cache.export(conan_reference) + + for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME): + tgz_path = os.path.join(export_folder, f) + if is_dirty(tgz_path): + self._output.warn("%s: Removing %s, marked as dirty" % (str(conan_reference), f)) + os.remove(tgz_path) + clean_dirty(tgz_path) + files, symlinks = gather_files(export_folder) if CONANFILE not in files or CONAN_MANIFEST not in files: raise ConanException("Cannot upload corrupted recipe '%s'" % str(conan_reference)) @@ -99,6 +108,11 @@ class RemoteManager(object): "Remove it with 'conan remove %s -p=%s'" % (package_reference, package_reference.conan, package_reference.package_id)) + tgz_path = os.path.join(package_folder, PACKAGE_TGZ_NAME) + if is_dirty(tgz_path): + self._output.warn("%s: Removing %s, marked as dirty" % (str(package_reference), PACKAGE_TGZ_NAME)) + os.remove(tgz_path) + clean_dirty(tgz_path) # Get all the files in that directory files, symlinks = gather_files(package_folder) @@ -188,7 +202,8 @@ class RemoteManager(object): t1 = time.time() def filter_function(urls): - file_url = urls.get(EXPORT_SOURCES_TGZ_NAME) + file_url = urls.pop(EXPORT_SOURCES_TGZ_NAME, None) + check_compressed_files(EXPORT_SOURCES_TGZ_NAME, urls) if file_url: urls = {EXPORT_SOURCES_TGZ_NAME: file_url} else: @@ -254,7 +269,9 @@ class RemoteManager(object): return self._call_remote(remote, "search", pattern, ignorecase) def search_packages(self, remote, reference, query): - return self._call_remote(remote, "search_packages", reference, query) + packages = self._call_remote(remote, "search_packages", reference, query) + packages = filter_packages(query, packages) + return packages def remove(self, conan_ref, remote): """ @@ -324,11 +341,10 @@ def compress_package_files(files, symlinks, dest_folder, output): def compress_files(files, symlinks, name, dest_dir): - """Compress the package and returns the new dict (name => content) of files, - only with the conanXX files and the compressed file""" t1 = time.time() # FIXME, better write to disk sequentially and not keep tgz contents in memory tgz_path = os.path.join(dest_dir, name) + set_dirty(tgz_path) with open(tgz_path, "wb") as tgz_handle: # tgz_contents = BytesIO() tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle) @@ -354,17 +370,27 @@ def compress_files(files, symlinks, name, dest_dir): tgz.close() + clean_dirty(tgz_path) duration = time.time() - t1 log_compressed_files(files, duration, tgz_path) return tgz_path +def check_compressed_files(tgz_name, files): + bare_name = os.path.splitext(tgz_name)[0] + for f in files: + if bare_name == os.path.splitext(f)[0]: + raise ConanException("This Conan version is not prepared to handle '%s' file format. " + "Please upgrade conan client." % f) + + def unzip_and_get_files(files, destination_dir, tgz_name): """Moves all files from package_files, {relative_name: tmp_abs_path} to destination_dir, unzipping the "tgz_name" if found""" tgz_file = files.pop(tgz_name, None) + check_compressed_files(tgz_name, files) if tgz_file: uncompress_file(tgz_file, destination_dir) os.remove(tgz_file) diff --git a/conans/client/runner.py b/conans/client/runner.py index a598e9721..a71a8f942 100644 --- a/conans/client/runner.py +++ b/conans/client/runner.py @@ -1,6 +1,7 @@ import io import os import sys +from contextlib import contextmanager from subprocess import Popen, PIPE, STDOUT from conans.util.files import decode_text from conans.errors import ConanException @@ -37,18 +38,19 @@ class ConanRunner(object): if self._print_commands_to_output and stream_output and self._log_run_to_output: stream_output.write(call_message) - # No output has to be redirected to logs or buffer or omitted - if output is True and not log_filepath and self._log_run_to_output and not subprocess: - return self._simple_os_call(command, cwd) - elif log_filepath: - if stream_output: - stream_output.write("Logging command output to file '%s'\n" % log_filepath) - with open(log_filepath, "a+") as log_handler: - if self._print_commands_to_output: - log_handler.write(call_message) - return self._pipe_os_call(command, stream_output, log_handler, cwd) - else: - return self._pipe_os_call(command, stream_output, None, cwd) + with pyinstaller_bundle_env_cleaned(): + # No output has to be redirected to logs or buffer or omitted + if output is True and not log_filepath and self._log_run_to_output and not subprocess: + return self._simple_os_call(command, cwd) + elif log_filepath: + if stream_output: + stream_output.write("Logging command output to file '%s'\n" % log_filepath) + with open(log_filepath, "a+") as log_handler: + if self._print_commands_to_output: + log_handler.write(call_message) + return self._pipe_os_call(command, stream_output, log_handler, cwd) + else: + return self._pipe_os_call(command, stream_output, None, cwd) def _pipe_os_call(self, command, stream_output, log_handler, cwd): @@ -69,7 +71,7 @@ class ConanRunner(object): if stream_output and self._log_run_to_output: try: stream_output.write(decoded_line) - except UnicodeEncodeError: # be agressive on text encoding + except UnicodeEncodeError: # be aggressive on text encoding decoded_line = decoded_line.encode("latin-1", "ignore").decode("latin-1", "ignore") stream_output.write(decoded_line) @@ -100,3 +102,27 @@ class ConanRunner(object): finally: os.chdir(old_dir) return result + + +if getattr(sys, 'frozen', False) and 'LD_LIBRARY_PATH' in os.environ: + + # http://pyinstaller.readthedocs.io/en/stable/runtime-information.html#ld-library-path-libpath-considerations + pyinstaller_bundle_dir = os.environ['LD_LIBRARY_PATH'].replace( + os.environ.get('LD_LIBRARY_PATH_ORIG', ''), '' + ).strip(';:') + + @contextmanager + def pyinstaller_bundle_env_cleaned(): + """Removes the pyinstaller bundle directory from LD_LIBRARY_PATH + + :return: None + """ + ld_library_path = os.environ['LD_LIBRARY_PATH'] + os.environ['LD_LIBRARY_PATH'] = ld_library_path.replace(pyinstaller_bundle_dir, '') + yield + os.environ['LD_LIBRARY_PATH'] = ld_library_path + +else: + @contextmanager + def pyinstaller_bundle_env_cleaned(): + yield diff --git a/conans/client/tools/files.py b/conans/client/tools/files.py index 5d6ddef97..0d42fe368 100644 --- a/conans/client/tools/files.py +++ b/conans/client/tools/files.py @@ -11,6 +11,7 @@ from conans.client.output import ConanOutput from conans.errors import ConanException from conans.util.files import (load, save, _generic_algorithm_sum) from conans.unicode import get_cwd +import six _global_output = None @@ -68,6 +69,11 @@ def unzip(filename, destination=".", keep_permissions=False, pattern=None): filename.endswith(".tbz2") or filename.endswith(".tar.bz2") or filename.endswith(".tar")): return untargz(filename, destination, pattern) + if filename.endswith(".tar.xz") or filename.endswith(".txz"): + if six.PY2: + raise ConanException("XZ format not supported in Python 2. Use Python 3 instead") + return untargz(filename, destination, pattern) + import zipfile full_path = os.path.normpath(os.path.join(get_cwd(), destination)) diff --git a/conans/client/tools/net.py b/conans/client/tools/net.py index 235fc69b5..59293e293 100644 --- a/conans/client/tools/net.py +++ b/conans/client/tools/net.py @@ -9,10 +9,13 @@ from conans.errors import ConanException _global_requester = None -def get(url, md5='', sha1='', sha256='', destination="."): +def get(url, md5='', sha1='', sha256='', destination=".", filename=""): """ high level downloader + unzipper + (optional hash checker) + delete temporary zip """ - filename = os.path.basename(url) + if not filename and ("?" in url or "=" in url): + raise ConanException("Cannot deduce file name form url. Use 'filename' parameter.") + + filename = filename or os.path.basename(url) download(url, filename) if md5: diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py index 455397546..17557672b 100644 --- a/conans/model/conan_file.py +++ b/conans/model/conan_file.py @@ -12,6 +12,8 @@ from conans.model.user_info import DepsUserInfo from conans.paths import RUN_LOG_NAME from conans.tools import environment_append, no_op from conans.client.output import Color +from conans.client.run_environment import RunEnvironment +from conans.client.tools.oss import os_info def create_options(conanfile): @@ -255,15 +257,23 @@ class ConanFile(object): """ define cpp_build_info, flags, etc """ - def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True): - if not win_bash: - retcode = self._runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) - else: + def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True, + ignore_errors=False, run_environment=False): + def _run(): + if not win_bash: + return self._runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) # FIXME: run in windows bash is not using output - retcode = tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem, - msys_mingw=msys_mingw) + return tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem, + msys_mingw=msys_mingw) + if run_environment: + with tools.environment_append(RunEnvironment(self).vars): + if os_info.is_macos: + command = 'DYLD_LIBRARY_PATH="%s" %s' % (os.environ.get('DYLD_LIBRARY_PATH', ''), command) + retcode = _run() + else: + retcode = _run() - if retcode != 0: + if not ignore_errors and retcode != 0: raise ConanException("Error %d while executing %s" % (retcode, command)) return retcode diff --git a/conans/search/search.py b/conans/search/search.py index 975a97403..2e189bb45 100644 --- a/conans/search/search.py +++ b/conans/search/search.py @@ -61,7 +61,7 @@ def evaluate(prop_name, prop_value, conan_vars_info): """ def compatible_prop(setting_value, prop_value): - return setting_value is None or prop_value == setting_value + return (prop_value == setting_value) or (prop_value == "None" and setting_value is None) info_settings = conan_vars_info.get("settings", []) info_options = conan_vars_info.get("options", []) diff --git a/conans/server/rest/controllers/file_upload_download_controller.py b/conans/server/rest/controllers/file_upload_download_controller.py index b49091a07..fe8ec36ab 100644 --- a/conans/server/rest/controllers/file_upload_download_controller.py +++ b/conans/server/rest/controllers/file_upload_download_controller.py @@ -20,7 +20,12 @@ class FileUploadDownloadController(Controller): token = request.query.get("signature", None) file_path = service.get_file_path(filepath, token) # https://github.com/kennethreitz/requests/issues/1586 - mimetype = "x-gzip" if filepath.endswith(".tgz") else "auto" + if filepath.endswith(".tgz"): + mimetype = "x-gzip" + elif filepath.endswith(".txz"): + mimetype = "x-xz" + else: + mimetype = "auto" return static_file(os.path.basename(file_path), root=os.path.dirname(file_path), mimetype=mimetype)
Using "?" in a tools.get() URL will fail, while using it in a tools.download() will succeed. To help us debug your issue please explain: - [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [x] I've specified the Conan version, operating system version and any tool that can be relevant. - [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. Example: ``` from conans import ConanFile, tools class LibraryConan(ConanFile): name = "Library" version = "1.0" license = "" url = "<Package recipe repository url here, for issues about the package>" description = "" settings = "os", "compiler", "build_type", "arch" def source(self): tools.get("http://example.com/?file=1") ``` ``` conan create . Company/beta ERROR: Library/1.0@Company/beta: Error in source() method, line 12 tools.get("http://example.com/?file=1") ConanConnectionError: Download failed, check server, possibly try again [Errno 22] Invalid argument: 'C:\\Users\\User\\.conan\\data\\Library\\1.0\\Company\\beta\\source\\?file=1' ```
conan-io/conan
diff --git a/conans/test/command/remove_test.py b/conans/test/command/remove_test.py index 093fddfb6..5cddca144 100644 --- a/conans/test/command/remove_test.py +++ b/conans/test/command/remove_test.py @@ -15,6 +15,35 @@ from conans.test.utils.test_files import temp_folder class RemoveOutdatedTest(unittest.TestCase): + + def remove_query_test(self): + test_server = TestServer(users={"lasote": "password"}) # exported users and passwords + servers = {"default": test_server} + client = TestClient(servers=servers, users={"default": [("lasote", "password")]}) + conanfile = """from conans import ConanFile +class Test(ConanFile): + settings = "os" + """ + client.save({"conanfile.py": conanfile}) + client.run("create . Test/0.1@lasote/testing -s os=Windows") + client.run("create . Test/0.1@lasote/testing -s os=Linux") + client.save({"conanfile.py": conanfile.replace("settings", "pass #")}) + client.run("create . Test2/0.1@lasote/testing") + client.run("upload * --all --confirm") + for remote in ("", "-r=default"): + client.run("remove Test/0.1@lasote/testing -q=os=Windows -f %s" % remote) + client.run("search Test/0.1@lasote/testing %s" % remote) + self.assertNotIn("os: Windows", client.out) + self.assertIn("os: Linux", client.out) + + client.run("remove Test2/0.1@lasote/testing -q=os=Windows -f %s" % remote) + client.run("search Test2/0.1@lasote/testing %s" % remote) + self.assertIn("Package_ID: 5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9", client.out) + client.run("remove Test2/0.1@lasote/testing -q=os=None -f %s" % remote) + client.run("search Test2/0.1@lasote/testing %s" % remote) + self.assertNotIn("Package_ID: 5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9", client.out) + self.assertIn("There are no packages", client.out) + def remove_outdated_test(self): test_server = TestServer(users={"lasote": "password"}) # exported users and passwords servers = {"default": test_server} diff --git a/conans/test/command/search_test.py b/conans/test/command/search_test.py index e94731ce9..457217c15 100644 --- a/conans/test/command/search_test.py +++ b/conans/test/command/search_test.py @@ -387,8 +387,7 @@ helloTest/1.4.10@fenix/stable""".format(remote) q = 'compiler="gcc" OR compiler.libcxx=libstdc++11' # Should find Visual because of the OR, visual doesn't care about libcxx - self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA", - "WindowsPackageSHA"], remote) + self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA"], remote) q = '(compiler="gcc" AND compiler.libcxx=libstdc++11) OR compiler.version=4.5' self._assert_pkg_q(q, ["LinuxPackageSHA"], remote) @@ -407,18 +406,25 @@ helloTest/1.4.10@fenix/stable""".format(remote) self._assert_pkg_q(q, ["PlatformIndependantSHA", "WindowsPackageSHA"], remote) q = '(os="Linux" OR os=Windows)' - self._assert_pkg_q(q, ["PlatformIndependantSHA", "LinuxPackageSHA", - "WindowsPackageSHA"], remote) + self._assert_pkg_q(q, ["LinuxPackageSHA", "WindowsPackageSHA"], remote) + + q = '(os="Linux" OR os=None)' + self._assert_pkg_q(q, ["LinuxPackageSHA", "PlatformIndependantSHA"], remote) + + q = '(os=None)' + self._assert_pkg_q(q, ["PlatformIndependantSHA"], remote) q = '(os="Linux" OR os=Windows) AND use_Qt=True' + self._assert_pkg_q(q, ["WindowsPackageSHA"], remote) + + q = '(os=None OR os=Windows) AND use_Qt=True' self._assert_pkg_q(q, ["PlatformIndependantSHA", "WindowsPackageSHA"], remote) q = '(os="Linux" OR os=Windows) AND use_Qt=True AND nonexistant_option=3' - self._assert_pkg_q(q, ["PlatformIndependantSHA", "WindowsPackageSHA"], remote) + self._assert_pkg_q(q, [], remote) q = '(os="Linux" OR os=Windows) AND use_Qt=True OR nonexistant_option=3' - self._assert_pkg_q(q, ["PlatformIndependantSHA", - "WindowsPackageSHA", "LinuxPackageSHA"], remote) + self._assert_pkg_q(q, ["WindowsPackageSHA", "LinuxPackageSHA"], remote) # test in local test_cases() @@ -474,9 +480,19 @@ helloTest/1.4.10@fenix/stable""".format(remote) self.client.run('search Hello/1.4.10/fenix/testing -q os=Windows') self.assertIn("WindowsPackageSHA", self.client.out) + self.assertNotIn("PlatformIndependantSHA", self.client.out) + self.assertNotIn("LinuxPackageSHA", self.client.out) + + self.client.run('search Hello/1.4.10/fenix/testing -q "os=Windows or os=None"') + self.assertIn("WindowsPackageSHA", self.client.out) self.assertIn("PlatformIndependantSHA", self.client.out) self.assertNotIn("LinuxPackageSHA", self.client.out) + self.client.run('search Hello/1.4.10/fenix/testing -q "os=Windows or os=Linux"') + self.assertIn("WindowsPackageSHA", self.client.out) + self.assertNotIn("PlatformIndependantSHA", self.client.out) + self.assertIn("LinuxPackageSHA", self.client.out) + self.client.run('search Hello/1.4.10/fenix/testing -q "os=Windows AND compiler.version=4.5"') self.assertIn("There are no packages for reference 'Hello/1.4.10@fenix/testing' " "matching the query 'os=Windows AND compiler.version=4.5'", self.client.out) diff --git a/conans/test/command/upload_test.py b/conans/test/command/upload_test.py index 661bf9d8e..193024241 100644 --- a/conans/test/command/upload_test.py +++ b/conans/test/command/upload_test.py @@ -2,9 +2,12 @@ import unittest from conans.tools import environment_append from conans.test.utils.tools import TestClient, TestServer from conans.test.utils.cpp_test_files import cpp_hello_conan_files -from conans.model.ref import ConanFileReference -from conans.util.files import save +from conans.model.ref import ConanFileReference, PackageReference +from conans.util.files import save, is_dirty, gzopen_without_timestamps import os +from mock import mock +from conans.errors import ConanException +from conans.paths import EXPORT_SOURCES_TGZ_NAME, PACKAGE_TGZ_NAME conanfile = """from conans import ConanFile @@ -75,6 +78,62 @@ class UploadTest(unittest.TestCase): self.assertIn("Uploading conan_package.tgz", client.user_io.out) self.assertIn("Uploading conanfile.py", client.user_io.out) + def broken_sources_tgz_test(self): + # https://github.com/conan-io/conan/issues/2854 + client = self._client() + client.save({"conanfile.py": conanfile, + "source.h": "my source"}) + client.run("create . user/testing") + ref = ConanFileReference.loads("Hello0/1.2.1@user/testing") + + def gzopen_patched(name, mode="r", fileobj=None, compresslevel=None, **kwargs): + raise ConanException("Error gzopen %s" % name) + with mock.patch('conans.client.remote_manager.gzopen_without_timestamps', new=gzopen_patched): + error = client.run("upload * --confirm", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: Error gzopen conan_sources.tgz", client.out) + + export_folder = client.client_cache.export(ref) + tgz = os.path.join(export_folder, EXPORT_SOURCES_TGZ_NAME) + self.assertTrue(os.path.exists(tgz)) + self.assertTrue(is_dirty(tgz)) + + client.run("upload * --confirm") + self.assertIn("WARN: Hello0/1.2.1@user/testing: Removing conan_sources.tgz, marked as dirty", + client.out) + self.assertTrue(os.path.exists(tgz)) + self.assertFalse(is_dirty(tgz)) + + def broken_package_tgz_test(self): + # https://github.com/conan-io/conan/issues/2854 + client = self._client() + client.save({"conanfile.py": conanfile, + "source.h": "my source"}) + client.run("create . user/testing") + package_ref = PackageReference.loads("Hello0/1.2.1@user/testing:" + "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9") + + def gzopen_patched(name, mode="r", fileobj=None, compresslevel=None, **kwargs): + if name == PACKAGE_TGZ_NAME: + raise ConanException("Error gzopen %s" % name) + return gzopen_without_timestamps(name, mode, fileobj, compresslevel, **kwargs) + with mock.patch('conans.client.remote_manager.gzopen_without_timestamps', new=gzopen_patched): + error = client.run("upload * --confirm --all", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: Error gzopen conan_package.tgz", client.out) + + export_folder = client.client_cache.package(package_ref) + tgz = os.path.join(export_folder, PACKAGE_TGZ_NAME) + self.assertTrue(os.path.exists(tgz)) + self.assertTrue(is_dirty(tgz)) + + client.run("upload * --confirm --all") + self.assertIn("WARN: Hello0/1.2.1@user/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9: " + "Removing conan_package.tgz, marked as dirty", + client.out) + self.assertTrue(os.path.exists(tgz)) + self.assertFalse(is_dirty(tgz)) + def corrupt_upload_test(self): client = self._client() diff --git a/conans/test/functional/runner_test.py b/conans/test/functional/runner_test.py index f0dc4fda8..d00f9eb71 100644 --- a/conans/test/functional/runner_test.py +++ b/conans/test/functional/runner_test.py @@ -2,7 +2,6 @@ import os import six import unittest -from io import StringIO from conans.client.runner import ConanRunner from conans.test.utils.tools import TestClient @@ -19,6 +18,18 @@ class RunnerTest(unittest.TestCase): client.run("build .") return client + def ignore_error_test(self): + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + def source(self): + ret = self.run("not_a_command", ignore_errors=True) + self.output.info("RETCODE %s" % (ret!=0)) +""" + client = TestClient() + client.save({"conanfile.py": conanfile}) + client.run("source .") + self.assertIn("RETCODE True", client.out) + def basic_test(self): conanfile = ''' from conans import ConanFile diff --git a/conans/test/generators/visual_studio_test.py b/conans/test/generators/visual_studio_test.py index 0f43200ec..fce6ead8c 100644 --- a/conans/test/generators/visual_studio_test.py +++ b/conans/test/generators/visual_studio_test.py @@ -7,6 +7,10 @@ from conans.model.settings import Settings from conans.model.conan_file import ConanFile from conans.model.build_info import CppInfo from conans.model.ref import ConanFileReference +from conans.test.utils.test_files import temp_folder +from conans.util.files import save +import os +from conans import tools class VisualStudioGeneratorTest(unittest.TestCase): @@ -27,3 +31,32 @@ class VisualStudioGeneratorTest(unittest.TestCase): self.assertIn('<PropertyGroup Label="Conan-RootDirs">', content) self.assertIn("<Conan-MyPkg-Root>dummy_root_folder1</Conan-MyPkg-Root>", content) self.assertIn("<Conan-My-Fancy-Pkg_2-Root>dummy_root_folder2</Conan-My-Fancy-Pkg_2-Root>", content) + + def user_profile_test(self): + conanfile = ConanFile(None, None, Settings({}), None) + ref = ConanFileReference.loads("MyPkg/0.1@user/testing") + tmp_folder = temp_folder() + pkg1 = os.path.join(tmp_folder, "pkg1") + cpp_info = CppInfo(pkg1) + cpp_info.includedirs = ["include"] + save(os.path.join(pkg1, "include/file.h"), "") + conanfile.deps_cpp_info.update(cpp_info, ref.name) + ref = ConanFileReference.loads("My.Fancy-Pkg_2/0.1@user/testing") + pkg2 = os.path.join(tmp_folder, "pkg2") + cpp_info = CppInfo(pkg2) + cpp_info.includedirs = ["include"] + save(os.path.join(pkg2, "include/file.h"), "") + conanfile.deps_cpp_info.update(cpp_info, ref.name) + generator = VisualStudioGenerator(conanfile) + + with tools.environment_append({"USERPROFILE": tmp_folder}): + content = generator.content + xml.etree.ElementTree.fromstring(content) + self.assertIn("<AdditionalIncludeDirectories>$(USERPROFILE)/pkg1/include;" + "$(USERPROFILE)/pkg2/include;", content) + + with tools.environment_append({"USERPROFILE": tmp_folder.upper()}): + content = generator.content + xml.etree.ElementTree.fromstring(content) + self.assertIn("<AdditionalIncludeDirectories>$(USERPROFILE)/pkg1/include;" + "$(USERPROFILE)/pkg2/include;", content) diff --git a/conans/test/integration/run_envronment_test.py b/conans/test/integration/run_envronment_test.py index 48a3638b9..efd78257b 100644 --- a/conans/test/integration/run_envronment_test.py +++ b/conans/test/integration/run_envronment_test.py @@ -34,3 +34,68 @@ class HelloConan(ConanFile): client.save({"conanfile.py": reuse}, clean_first=True) client.run("install . --build missing") client.run("build .") + self.assertIn("Hello Hello0", client.out) + + def test_shared_run_environment(self): + client = TestClient() + cmake = """set(CMAKE_CXX_COMPILER_WORKS 1) +set(CMAKE_CXX_ABI_COMPILED 1) +project(MyHello CXX) +cmake_minimum_required(VERSION 2.8.12) + +add_library(hello SHARED hello.cpp) +add_executable(say_hello main.cpp) +target_link_libraries(say_hello hello)""" + hello_h = """#ifdef WIN32 + #define HELLO_EXPORT __declspec(dllexport) +#else + #define HELLO_EXPORT +#endif + +HELLO_EXPORT void hello(); +""" + hello_cpp = r"""#include "hello.h" +#include <iostream> +void hello(){ + std::cout<<"Hello Tool!\n"; +} +""" + main = """#include "hello.h" + int main(){ + hello(); + } + """ + conanfile = """from conans import ConanFile, CMake +class Pkg(ConanFile): + exports_sources = "*" + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def package(self): + self.copy("*say_hello.exe", dst="bin", keep_path=False) + self.copy("*say_hello", dst="bin", keep_path=False) + self.copy(pattern="*.dll", dst="bin", keep_path=False) + self.copy(pattern="*.dylib", dst="lib", keep_path=False) + self.copy(pattern="*.so", dst="lib", keep_path=False) +""" + client.save({"conanfile.py": conanfile, + "CMakeLists.txt": cmake, + "main.cpp": main, + "hello.cpp": hello_cpp, + "hello.h": hello_h}) + client.run("create . Pkg/0.1@user/testing") + + reuse = '''from conans import ConanFile +class HelloConan(ConanFile): + requires = "Pkg/0.1@user/testing" + + def build(self): + self.run("say_hello", run_environment=True) +''' + + client.save({"conanfile.py": reuse}, clean_first=True) + client.run("install .") + client.run("build .") + self.assertIn("Hello Tool!", client.out) diff --git a/conans/test/util/tools_test.py b/conans/test/util/tools_test.py index 557095f54..6927347c6 100644 --- a/conans/test/util/tools_test.py +++ b/conans/test/util/tools_test.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +from bottle import static_file, request import mock import os import platform @@ -25,7 +25,8 @@ from conans.model.settings import Settings from conans.test.utils.runner import TestRunner from conans.test.utils.test_files import temp_folder -from conans.test.utils.tools import TestClient, TestBufferConanOutput, create_local_git_repo +from conans.test.utils.tools import TestClient, TestBufferConanOutput, create_local_git_repo, \ + StoppableThreadBottle from conans.tools import which from conans.tools import OSInfo, SystemPackageTool, replace_in_file, AptTool, ChocolateyTool,\ @@ -33,6 +34,8 @@ from conans.tools import OSInfo, SystemPackageTool, replace_in_file, AptTool, Ch from conans.util.files import save, load, md5 import requests +from nose.plugins.attrib import attr + class SystemPackageToolTest(unittest.TestCase): def setUp(self): @@ -993,6 +996,51 @@ ProgramFiles(x86)=C:\Program Files (x86) else: self.assertEqual(str, type(result)) + @attr('slow') + def get_filename_download_test(self): + # Create a tar file to be downloaded from server + with tools.chdir(tools.mkdir_tmp()): + import tarfile + tar_file = tarfile.open("sample.tar.gz", "w:gz") + tools.mkdir("test_folder") + tar_file.add(os.path.abspath("test_folder"), "test_folder") + tar_file.close() + file_path = os.path.abspath("sample.tar.gz") + assert(os.path.exists(file_path)) + + # Instance stoppable thread server and add endpoints + thread = StoppableThreadBottle() + + @thread.server.get("/this_is_not_the_file_name") + def get_file(): + return static_file(os.path.basename(file_path), root=os.path.dirname(file_path)) + + @thread.server.get("/") + def get_file2(): + self.assertEquals(request.query["file"], "1") + return static_file(os.path.basename(file_path), root=os.path.dirname(file_path)) + + thread.run_server() + + # Test: File name cannot be deduced from '?file=1' + with self.assertRaisesRegexp(ConanException, + "Cannot deduce file name form url. Use 'filename' parameter."): + tools.get("http://localhost:8266/?file=1") + + # Test: Works with filename parameter instead of '?file=1' + with tools.chdir(tools.mkdir_tmp()): + tools.get("http://localhost:8266/?file=1", filename="sample.tar.gz") + self.assertTrue(os.path.exists("test_folder")) + + # Test: Use a different endpoint but still not the filename one + with tools.chdir(tools.mkdir_tmp()): + from zipfile import BadZipfile + with self.assertRaises(BadZipfile): + tools.get("http://localhost:8266/this_is_not_the_file_name") + tools.get("http://localhost:8266/this_is_not_the_file_name", filename="sample.tar.gz") + self.assertTrue(os.path.exists("test_folder")) + thread.stop() + class GitToolTest(unittest.TestCase): @@ -1059,10 +1107,10 @@ class GitToolTest(unittest.TestCase): def _create_paths(): tmp = temp_folder() submodule_path = os.path.join( - tmp, + tmp, os.path.basename(os.path.normpath(submodule))) subsubmodule_path = os.path.join( - submodule_path, + submodule_path, os.path.basename(os.path.normpath(subsubmodule))) return tmp, submodule_path, subsubmodule_path @@ -1079,7 +1127,7 @@ class GitToolTest(unittest.TestCase): with self.assertRaisesRegexp(ConanException, "Invalid 'submodule' attribute value in the 'scm'."): git.clone(path, submodule="invalid") - # Check shallow + # Check shallow tmp, submodule_path, subsubmodule_path = _create_paths() git = Git(tmp) git.clone(path, submodule="shallow") diff --git a/conans/test/util/xz_test.py b/conans/test/util/xz_test.py new file mode 100644 index 000000000..262549736 --- /dev/null +++ b/conans/test/util/xz_test.py @@ -0,0 +1,88 @@ +import os +from unittest import TestCase +import six +import unittest +import tarfile + +from conans.test.utils.test_files import temp_folder +from conans.tools import unzip, save +from conans.util.files import load, save_files +from conans.errors import ConanException +from conans.test.utils.tools import TestClient, TestServer +from conans.model.ref import ConanFileReference, PackageReference + + +class XZTest(TestCase): + def test_error_xz(self): + server = TestServer() + ref = ConanFileReference.loads("Pkg/0.1@user/channel") + export = server.paths.export(ref) + save_files(export, {"conanfile.py": "#", + "conanmanifest.txt": "#", + "conan_export.txz": "#"}) + client = TestClient(servers={"default": server}, + users={"default": [("lasote", "mypass")]}) + error = client.run("install Pkg/0.1@user/channel", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: This Conan version is not prepared to handle " + "'conan_export.txz' file format", client.out) + + def test_error_sources_xz(self): + server = TestServer() + ref = ConanFileReference.loads("Pkg/0.1@user/channel") + client = TestClient(servers={"default": server}, + users={"default": [("lasote", "mypass")]}) + export = server.paths.export(ref) + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + exports_sources = "*" +""" + save_files(export, {"conanfile.py": conanfile, + "conanmanifest.txt": "1", + "conan_sources.txz": "#"}) + error = client.run("install Pkg/0.1@user/channel --build", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: This Conan version is not prepared to handle " + "'conan_sources.txz' file format", client.out) + + def test_error_package_xz(self): + server = TestServer() + ref = ConanFileReference.loads("Pkg/0.1@user/channel") + client = TestClient(servers={"default": server}, + users={"default": [("lasote", "mypass")]}) + export = server.paths.export(ref) + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + exports_sources = "*" +""" + save_files(export, {"conanfile.py": conanfile, + "conanmanifest.txt": "1"}) + pkg_ref = PackageReference(ref, "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9") + package = server.paths.package(pkg_ref) + save_files(package, {"conaninfo.txt": "#", + "conanmanifest.txt": "1", + "conan_package.txz": "#"}) + error = client.run("install Pkg/0.1@user/channel", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: This Conan version is not prepared to handle " + "'conan_package.txz' file format", client.out) + + @unittest.skipUnless(six.PY3, "only Py3") + def test(self): + tmp_dir = temp_folder() + file_path = os.path.join(tmp_dir, "a_file.txt") + save(file_path, "my content!") + txz = os.path.join(tmp_dir, "sample.tar.xz") + with tarfile.open(txz, "w:xz") as tar: + tar.add(file_path, "a_file.txt") + + dest_folder = temp_folder() + unzip(txz, dest_folder) + content = load(os.path.join(dest_folder, "a_file.txt")) + self.assertEqual(content, "my content!") + + @unittest.skipUnless(six.PY2, "only Py2") + def test_error_python2(self): + with self.assertRaisesRegexp(ConanException, "XZ format not supported in Python 2"): + dest_folder = temp_folder() + unzip("somefile.tar.xz", dest_folder) diff --git a/conans/test/utils/tools.py b/conans/test/utils/tools.py index 824cbb68e..ee5ff714b 100644 --- a/conans/test/utils/tools.py +++ b/conans/test/utils/tools.py @@ -2,13 +2,16 @@ import os import shlex import shutil import sys +import threading import uuid from collections import Counter from contextlib import contextmanager from io import StringIO +import bottle import requests import six +import time from mock import Mock from six.moves.urllib.parse import urlsplit, urlunsplit from webtest.app import TestApp @@ -507,3 +510,24 @@ class TestClient(object): save_files(path, files) if not files: mkdir(self.current_folder) + + +class StoppableThreadBottle(threading.Thread): + """ + Real server to test download endpoints + """ + server = None + + def __init__(self, host="127.0.0.1", port=8266): + self.server = bottle.Bottle() + super(StoppableThreadBottle, self).__init__(target=self.server.run, kwargs={"host": host, + "port": port}) + self.daemon = True + self._stop = threading.Event() + + def stop(self): + self._stop.set() + + def run_server(self): + self.start() + time.sleep(1)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 9 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "nose-cov", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==1.6.6 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@c3baafb780b6e5498f8bd460426901d9d5ab10e1#egg=conan cov-core==1.15.0 coverage==4.2 deprecation==2.0.7 distro==1.1.0 fasteners==0.19 future==0.16.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 node-semver==0.2.0 nose==1.3.7 nose-cov==1.6 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 Pygments==2.14.0 PyJWT==1.7.1 pylint==1.8.4 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.12 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==1.6.6 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - cov-core==1.15.0 - coverage==4.2 - deprecation==2.0.7 - distro==1.1.0 - fasteners==0.19 - future==0.16.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - node-semver==0.2.0 - nose==1.3.7 - nose-cov==1.6 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==1.8.4 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.12 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/util/xz_test.py::XZTest::test" ]
[ "conans/test/integration/run_envronment_test.py::RunEnvironmentTest::test_run_environment", "conans/test/integration/run_envronment_test.py::RunEnvironmentTest::test_shared_run_environment", "conans/test/util/tools_test.py::ToolsTest::test_get_env_in_conanfile", "conans/test/util/tools_test.py::ToolsTest::test_global_tools_overrided", "conans/test/util/tools_test.py::GitToolTest::test_clone_submodule_git", "conans/test/util/xz_test.py::XZTest::test_error_package_xz", "conans/test/util/xz_test.py::XZTest::test_error_sources_xz", "conans/test/util/xz_test.py::XZTest::test_error_xz" ]
[ "conans/test/functional/runner_test.py::RunnerTest::test_write_to_stringio", "conans/test/util/tools_test.py::ReplaceInFileTest::test_replace_in_file", "conans/test/util/tools_test.py::ToolsTest::test_environment_nested", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_git", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_without_branch", "conans/test/util/tools_test.py::GitToolTest::test_clone_git", "conans/test/util/tools_test.py::GitToolTest::test_credentials", "conans/test/util/tools_test.py::GitToolTest::test_verify_ssl" ]
[]
MIT License
2,757
[ "conans/client/remote_manager.py", "conans/client/runner.py", "conans/client/tools/files.py", "conans/server/rest/controllers/file_upload_download_controller.py", ".ci/jenkins/conf.py", "conans/search/search.py", "conans/client/generators/visualstudio.py", "conans/client/tools/net.py", "conans/model/conan_file.py" ]
[ "conans/client/remote_manager.py", "conans/client/runner.py", "conans/client/tools/files.py", "conans/server/rest/controllers/file_upload_download_controller.py", ".ci/jenkins/conf.py", "conans/search/search.py", "conans/client/generators/visualstudio.py", "conans/client/tools/net.py", "conans/model/conan_file.py" ]
mesonbuild__meson-3863
09ad29ec560f2a05108694d909de30b5e3e58357
2018-07-10 11:37:25
4ff7b65f9be936ef406aa839958c1db991ba8272
nirbheek: Another thing that would be a Very Nice Thing that ensures that stuff like this doesn't break the Intel compiler in the future is to find a way to add the latest version of ICC to the Docker instance that we use for running [Linux tests on the CI](https://github.com/mesonbuild/meson/blob/master/.travis.yml). Then we can run the tests against it in the same way that we run clang tests. asartori86: > find a way to add the latest version of ICC to the Docker instance don't you have to pay to have the intel compiler? nirbheek: > don't you have to pay for intel compiler? I believe you can get a free version for open source use: https://software.intel.com/en-us/qualify-for-free-software/opensourcecontributor I remember getting it for FOSS use 1-2 years ago when I was implementing it for Meson, but I don't remember how.
diff --git a/mesonbuild/compilers/c.py b/mesonbuild/compilers/c.py index b62155b7b..af3e2c4a1 100644 --- a/mesonbuild/compilers/c.py +++ b/mesonbuild/compilers/c.py @@ -866,12 +866,29 @@ class CCompiler(Compiler): return patterns @staticmethod - def _get_trials_from_pattern(pattern, directory, libname): + def _sort_shlibs_openbsd(libs): + filtered = [] + for lib in libs: + # Validate file as a shared library of type libfoo.so.X.Y + ret = lib.rsplit('.so.', maxsplit=1) + if len(ret) != 2: + continue + try: + float(ret[1]) + except ValueError: + continue + filtered.append(lib) + float_cmp = lambda x: float(x.rsplit('.so.', maxsplit=1)[1]) + return sorted(filtered, key=float_cmp, reverse=True) + + @classmethod + def _get_trials_from_pattern(cls, pattern, directory, libname): f = os.path.join(directory, pattern.format(libname)) + # Globbing for OpenBSD if '*' in pattern: # NOTE: globbing matches directories and broken symlinks # so we have to do an isfile test on it later - return glob.glob(f) + return cls._sort_shlibs_openbsd(glob.glob(f)) return [f] @staticmethod diff --git a/mesonbuild/compilers/compilers.py b/mesonbuild/compilers/compilers.py index 21aab1163..25835a32d 100644 --- a/mesonbuild/compilers/compilers.py +++ b/mesonbuild/compilers/compilers.py @@ -1561,6 +1561,9 @@ class IntelCompiler: else: return ['-openmp'] + def get_link_whole_for(self, args): + return GnuCompiler.get_link_whole_for(self, args) + class ArmCompiler: # Functionality that is common to all ARM family compilers.
intel compiler doesn't implement link_whole Hi, I got an error when including PETSc as external dependency. Here is my `meson.build` ``` project('xxx', 'cpp', default_options : ['cpp_std=c++11']) pdep = dependency('PETSc') ``` this is what I get when I run meson ```The Meson build system Version: 0.47.0 Source dir: /some/path/ Build dir: /some/path/build Build type: native build Project name: xxx Project version: undefined Native C++ compiler: icpc (intel 17.0.4 "icpc (ICC) 17.0.4 20170411") Build machine cpu family: x86_64 Build machine cpu: x86_64 Found pkg-config: /usr/bin/pkg-config (0.27.1) meson.build:5:0: ERROR: Language C++ does not support linking whole archives. A full log can be found at /some/path/build/meson-logs/meson-log.txt ``` and the log is ``` Build started at 2018-07-10T11:48:00.454242 Main binary: /bin/python3 Python system: Linux The Meson build system Version: 0.47.0 Source dir: /some/path Build dir: /some/path/build Build type: native build Project name: xxx Project version: undefined Sanity testing C++ compiler: icpc Is cross compiler: False. Sanity check compiler command line: icpc /some/path/build/meson-private/sanitycheckcpp.cc -o /some/path/build/meson-private/sanitycheckcpp.exe Sanity check compile stdout: ----- Sanity check compile stderr: ----- Running test binary command: /some/path/build/meson-private/sanitycheckcpp.exe Native C++ compiler: icpc (intel 17.0.4 "icpc (ICC) 17.0.4 20170411") Build machine cpu family: x86_64 Build machine cpu: x86_64 Found pkg-config: /usr/bin/pkg-config (0.27.1) Determining dependency 'PETSc' with pkg-config executable '/usr/bin/pkg-config' Called `/usr/bin/pkg-config --modversion PETSc` -> 0 3.9.2 Called `/usr/bin/pkg-config --cflags PETSc` -> 0 -I/...../petsc-3.9.2/arch-linux-c-dbg-g-real-avx-2-512-int64/include Called `/usr/bin/pkg-config PETSc --libs` -> 0 -L/...../petsc-3.9.2/arch-linux-c-dbg-g-real-avx-2-512-int64/lib -lpetsc Called `/usr/bin/pkg-config PETSc --libs` -> 0 -L/......./petsc-3.9.2/arch-linux-c-dbg-g-real-avx-2-512-int64/lib -lpetsc meson.build:5:0: ERROR: Language C++ does not support linking whole archives. ``` however, if I issue from the command line ``` icpc $(pkg-config --cflags --libs PETSc) meson-private/sanitycheckcpp.cc ``` it compiles and links.. What happens inside the call to `dependency`? Why I get that error and how can I solve it? thanks
mesonbuild/meson
diff --git a/run_unittests.py b/run_unittests.py index df4603e3a..f4e95a302 100755 --- a/run_unittests.py +++ b/run_unittests.py @@ -522,12 +522,28 @@ class InternalTests(unittest.TestCase): self.assertEqual(p, shr) p = cc.get_library_naming(env, 'static') self.assertEqual(p, stc) - p = cc.get_library_naming(env, 'default') - self.assertEqual(p, shr + stc) - p = cc.get_library_naming(env, 'shared-static') - self.assertEqual(p, shr + stc) p = cc.get_library_naming(env, 'static-shared') self.assertEqual(p, stc + shr) + p = cc.get_library_naming(env, 'shared-static') + self.assertEqual(p, shr + stc) + p = cc.get_library_naming(env, 'default') + self.assertEqual(p, shr + stc) + # Test find library by mocking up openbsd + if platform != 'openbsd': + return + with tempfile.TemporaryDirectory() as tmpdir: + with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f: + f.write('') + with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f: + f.write('') + with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f: + f.write('') + with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f: + f.write('') + with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f: + f.write('') + found = cc.find_library_real('foo', env, [tmpdir], '', 'default') + self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0') def test_find_library_patterns(self): '''
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
0.47
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 -e git+https://github.com/mesonbuild/meson.git@09ad29ec560f2a05108694d909de30b5e3e58357#egg=meson packaging==24.2 pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1
name: meson channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/meson
[ "run_unittests.py::InternalTests::test_find_library_patterns" ]
[ "run_unittests.py::AllPlatformTests::test_absolute_prefix_libdir", "run_unittests.py::AllPlatformTests::test_all_forbidden_targets_tested", "run_unittests.py::AllPlatformTests::test_always_prefer_c_compiler_for_asm", "run_unittests.py::AllPlatformTests::test_array_option_bad_change", "run_unittests.py::AllPlatformTests::test_array_option_change", "run_unittests.py::AllPlatformTests::test_array_option_empty_equivalents", "run_unittests.py::AllPlatformTests::test_build_by_default", "run_unittests.py::AllPlatformTests::test_check_module_linking", "run_unittests.py::AllPlatformTests::test_command_line", "run_unittests.py::AllPlatformTests::test_compiler_detection", "run_unittests.py::AllPlatformTests::test_compiler_options_documented", "run_unittests.py::AllPlatformTests::test_compiler_run_command", "run_unittests.py::AllPlatformTests::test_configure_file_warnings", "run_unittests.py::AllPlatformTests::test_conflicting_d_dash_option", "run_unittests.py::AllPlatformTests::test_cpu_families_documented", "run_unittests.py::AllPlatformTests::test_cross_file_system_paths", "run_unittests.py::AllPlatformTests::test_custom_target_changes_cause_rebuild", "run_unittests.py::AllPlatformTests::test_custom_target_exe_data_deterministic", "run_unittests.py::AllPlatformTests::test_dash_d_dedup", "run_unittests.py::AllPlatformTests::test_default_options_prefix", "run_unittests.py::AllPlatformTests::test_default_options_prefix_dependent_defaults", "run_unittests.py::AllPlatformTests::test_dirs", "run_unittests.py::AllPlatformTests::test_dist_git", "run_unittests.py::AllPlatformTests::test_dist_hg", "run_unittests.py::AllPlatformTests::test_feature_check_usage_subprojects", "run_unittests.py::AllPlatformTests::test_flock", "run_unittests.py::AllPlatformTests::test_forcefallback", "run_unittests.py::AllPlatformTests::test_free_stringarray_setting", "run_unittests.py::AllPlatformTests::test_guessed_linker_dependencies", "run_unittests.py::AllPlatformTests::test_identical_target_name_in_subdir_flat_layout", "run_unittests.py::AllPlatformTests::test_identical_target_name_in_subproject_flat_layout", "run_unittests.py::AllPlatformTests::test_install_introspection", "run_unittests.py::AllPlatformTests::test_internal_include_order", "run_unittests.py::AllPlatformTests::test_libdir_must_be_inside_prefix", "run_unittests.py::AllPlatformTests::test_markdown_files_in_sitemap", "run_unittests.py::AllPlatformTests::test_ndebug_if_release_disabled", "run_unittests.py::AllPlatformTests::test_ndebug_if_release_enabled", "run_unittests.py::AllPlatformTests::test_permitted_method_kwargs", "run_unittests.py::AllPlatformTests::test_pkgconfig_gen_escaping", "run_unittests.py::AllPlatformTests::test_pkgconfig_static", "run_unittests.py::AllPlatformTests::test_prebuilt_object", "run_unittests.py::AllPlatformTests::test_prebuilt_shared_lib", "run_unittests.py::AllPlatformTests::test_prebuilt_static_lib", "run_unittests.py::AllPlatformTests::test_prefix_dependent_defaults", "run_unittests.py::AllPlatformTests::test_preprocessor_checks_CPPFLAGS", "run_unittests.py::AllPlatformTests::test_rpath_uses_ORIGIN", "run_unittests.py::AllPlatformTests::test_run_target_files_path", "run_unittests.py::AllPlatformTests::test_same_d_option_twice", "run_unittests.py::AllPlatformTests::test_same_d_option_twice_configure", "run_unittests.py::AllPlatformTests::test_same_dash_option_twice", "run_unittests.py::AllPlatformTests::test_same_dash_option_twice_configure", "run_unittests.py::AllPlatformTests::test_same_project_d_option_twice", "run_unittests.py::AllPlatformTests::test_same_project_d_option_twice_configure", "run_unittests.py::AllPlatformTests::test_source_changes_cause_rebuild", "run_unittests.py::AllPlatformTests::test_static_compile_order", "run_unittests.py::AllPlatformTests::test_static_library_lto", "run_unittests.py::AllPlatformTests::test_static_library_overwrite", "run_unittests.py::AllPlatformTests::test_subproject_promotion", "run_unittests.py::AllPlatformTests::test_suite_selection", "run_unittests.py::AllPlatformTests::test_templates", "run_unittests.py::AllPlatformTests::test_testsetup_selection", "run_unittests.py::AllPlatformTests::test_testsetups", "run_unittests.py::AllPlatformTests::test_uninstall", "run_unittests.py::AllPlatformTests::test_warning_location", "run_unittests.py::FailureTests::test_apple_frameworks_dependency", "run_unittests.py::FailureTests::test_boost_BOOST_ROOT_dependency", "run_unittests.py::FailureTests::test_boost_notfound_dependency", "run_unittests.py::FailureTests::test_dependency", "run_unittests.py::FailureTests::test_dependency_invalid_method", "run_unittests.py::FailureTests::test_dict_forbids_duplicate_keys", "run_unittests.py::FailureTests::test_dict_forbids_integer_key", "run_unittests.py::FailureTests::test_dict_requires_key_value_pairs", "run_unittests.py::FailureTests::test_exception_exit_status", "run_unittests.py::FailureTests::test_gnustep_notfound_dependency", "run_unittests.py::FailureTests::test_llvm_dependency", "run_unittests.py::FailureTests::test_objc_cpp_detection", "run_unittests.py::FailureTests::test_sdl2_notfound_dependency", "run_unittests.py::FailureTests::test_subproject_variables", "run_unittests.py::FailureTests::test_using_recent_feature", "run_unittests.py::FailureTests::test_using_too_recent_feature", "run_unittests.py::FailureTests::test_using_too_recent_feature_dependency", "run_unittests.py::FailureTests::test_wx_dependency", "run_unittests.py::FailureTests::test_wx_notfound_dependency", "run_unittests.py::WindowsTests::test_find_program", "run_unittests.py::WindowsTests::test_ignore_libs", "run_unittests.py::WindowsTests::test_rc_depends_files", "run_unittests.py::LinuxlikeTests::test_apple_bitcode", "run_unittests.py::LinuxlikeTests::test_apple_bitcode_modules", "run_unittests.py::LinuxlikeTests::test_basic_soname", "run_unittests.py::LinuxlikeTests::test_build_rpath", "run_unittests.py::LinuxlikeTests::test_compiler_c_stds", "run_unittests.py::LinuxlikeTests::test_compiler_check_flags_order", "run_unittests.py::LinuxlikeTests::test_compiler_cpp_stds", "run_unittests.py::LinuxlikeTests::test_coverage", "run_unittests.py::LinuxlikeTests::test_cpp_std_override", "run_unittests.py::LinuxlikeTests::test_cross_find_program", "run_unittests.py::LinuxlikeTests::test_custom_soname", "run_unittests.py::LinuxlikeTests::test_install_umask", "run_unittests.py::LinuxlikeTests::test_installed_modes", "run_unittests.py::LinuxlikeTests::test_installed_modes_extended", "run_unittests.py::LinuxlikeTests::test_installed_soname", "run_unittests.py::LinuxlikeTests::test_introspect_dependencies", "run_unittests.py::LinuxlikeTests::test_old_gnome_module_codepaths", "run_unittests.py::LinuxlikeTests::test_order_of_l_arguments", "run_unittests.py::LinuxlikeTests::test_pch_with_address_sanitizer", "run_unittests.py::LinuxlikeTests::test_pic", "run_unittests.py::LinuxlikeTests::test_pkg_unfound", "run_unittests.py::LinuxlikeTests::test_pkgconfig_formatting", "run_unittests.py::LinuxlikeTests::test_pkgconfig_gen", "run_unittests.py::LinuxlikeTests::test_pkgconfig_gen_deps", "run_unittests.py::LinuxlikeTests::test_pkgconfig_internal_libraries", "run_unittests.py::LinuxlikeTests::test_pkgconfig_usage", "run_unittests.py::LinuxlikeTests::test_qt5dependency_pkgconfig_detection", "run_unittests.py::LinuxlikeTests::test_qt5dependency_qmake_detection", "run_unittests.py::LinuxlikeTests::test_reconfigure", "run_unittests.py::LinuxlikeTests::test_run_installed", "run_unittests.py::LinuxlikeTests::test_soname", "run_unittests.py::LinuxlikeTests::test_unity_subproj", "run_unittests.py::LinuxlikeTests::test_usage_external_library", "run_unittests.py::LinuxlikeTests::test_vala_c_warnings", "run_unittests.py::LinuxlikeTests::test_vala_generated_source_buildir_inside_source_tree", "run_unittests.py::LinuxArmCrossCompileTests::test_cflags_cross_environment_pollution", "run_unittests.py::LinuxArmCrossCompileTests::test_cross_file_overrides_always_args", "run_unittests.py::PythonTests::test_versions", "run_unittests.py::RewriterTests::test_basic", "run_unittests.py::RewriterTests::test_subdir" ]
[ "run_unittests.py::InternalTests::test_compiler_args_class", "run_unittests.py::InternalTests::test_extract_as_list", "run_unittests.py::InternalTests::test_listify", "run_unittests.py::InternalTests::test_mode_symbolic_to_bits", "run_unittests.py::InternalTests::test_needs_exe_wrapper_override", "run_unittests.py::InternalTests::test_pkgconfig_module", "run_unittests.py::InternalTests::test_snippets", "run_unittests.py::InternalTests::test_string_templates_substitution", "run_unittests.py::InternalTests::test_version_number" ]
[]
Apache License 2.0
2,758
[ "mesonbuild/compilers/compilers.py", "mesonbuild/compilers/c.py" ]
[ "mesonbuild/compilers/compilers.py", "mesonbuild/compilers/c.py" ]
dgasmith__opt_einsum-30
45eb0220dc00186c0dfd0de27e6dd725a8d6e252
2018-07-10 19:07:04
45eb0220dc00186c0dfd0de27e6dd725a8d6e252
codecov-io: # [Codecov](https://codecov.io/gh/dgasmith/opt_einsum/pull/30?src=pr&el=h1) Report > Merging [#30](https://codecov.io/gh/dgasmith/opt_einsum/pull/30?src=pr&el=desc) into [master](https://codecov.io/gh/dgasmith/opt_einsum/commit/960cf7c45993077be6d4ad04a1564b6042f0e092?src=pr&el=desc) will **increase** coverage by `0.1%`. > The diff coverage is `100%`. dgasmith: Cool! I will think a bit about how to observe constant expressions that are "future" in the path. Standard graph techniques could do this trivially, but seems like a lot of code when we can probably just use a few loops for it. Also in the first example I think the constants `kwarg` should be `constants=[2, 3, 4]`? jcmgray: > Cool! I will think a bit about how to observe constant expressions that are "future" in the path. Standard graph techniques could do this trivially, but seems like a lot of code when we can probably just use a few loops for it. Yeah I haven't thought too much about these 'future' constant operations yet, but suspect they are quite rare. One thing that would make addressing this easier would be to store the tensors not in a ``list`` but in a ``dict`` so that they are identifiable by more than just their position (which changes, meaning its hard to swap the order of operations). This kind of thing might also e.g. allow caching in the ``'optimal'`` algorithm and also reduce the cost of updating potential contractions in ``greedy``. Quite a big backend change though! And probably not necessary for this PR. > Also in the first example I think the constants ``kwarg`` should be ``constants=[2, 3, 4]``? The supplied ops are ``shape, array, array, array, shape`` so ``[1, 2, 3]`` is correct (if we are using 0-indexing). A point of confusion might be that the ``repr`` has all the constants moved to the end? : ``` 'ij,jk,kl,lm,mn->ni' -> 'ij,mn,[jk,kl,lm]->ni' ``` jcmgray: Yes think I'll leave future constants for moment, as like you say, a more graph like structure will make them easier to implement. Here's some themes and variations on potential representations: ```python # 1 - nothing "ContractExpression('ij,jk,kl,lm,mn->ni', constants=(1, 2, 4))" # 2 - individually wrapped "ContractExpression('ij,[jk,kl],lm,[mn]->ni', constants=(1, 2, 4))" # 3 - Moved to end "ContractExpression('ij,lm[jk,kl,mn]->ni')" # 4 - explicitly mentioned in contraction string #1 "ContractExpression('ij,lm->ni [Constant: jk,kl,mn]')" # 5 - explicitly mentioned in contraction string #2 "ContractExpression('ij,lm,[Const:jk,kl,mn]->ni')" # 6 - explicitly mentioned in contraction string #3 "ContractExpression('ij,lm,Const[jk,kl,mn]->ni')" ``` I'd favour 2 or maybe 5. Its certainly nice to keep the them in order, apart from making it less clear how many arguments the expression should now be called with (realistically if the user has created the expression they probably know though!). > Since we are moving around the order we may have to do something like Input: A_ij,B_[jk],C_[kl],D_[lm],E_mn->ni. Need to think about this more. My 2c on introducing new letters is that might be a bit confusing, especially as the indices themselves could potentially be any upper or lower case characters. --- I've also made a few tweaks to the backend stuff (and also will submit another PR later with support for torch and eager-mode tensorflow) but so far the performance gains from compiling these expressions with constants is pretty good, especially with a GPU! > On switch to a dictionary it is certainly something to think about it. It would be nice to remove all of the indexing that we have floating around. I think you already switched the greedy algorithm to a dynamic approach to reduce the cost, not sure how it would directly effect optimal. It is certainly a point that we could do some sort of memoization on the contraction finder. Yeah the logic for a dict in the optimal case is just that all tensors and contraction costs are uniquely defined by their indices (up to permutations), so a lot of different results could be reused regardless of however factorially many paths reach a certain point. dgasmith: I guess my main concern is what if someone writes the following: ``` >>> contract_expression("ij,ij,ij,ij->, *ops, constants=[1, 3]) "ContractExpression('ij,ij,[Const:ij,ij]->')" ``` It is an edge case, but I think the loss of information there is pretty rough. All things considered I would vote for #2, I believe it is quite a bit more elegant than my try. -- Great, I had been wondering about JIT for these expressions for a bit now. One thing to consider is `numba` or `numexpr` as well. jcmgray: Yes that is a good edge case to point out! I've gone with number 2 as it does seem to be a good tradeoff. I've also had a first run at some documentation. Let me know if anything could be clearer. Was also thinking about adding this and ``opt_einsum`` 's other features to a kind of 'highlights' list on the main readme. Thoughts? ---- > Great, I had been wondering about JIT for these expressions for a bit now. One thing to consider is numba or numexpr as well. Yes - a recent ``numba`` release actually added support for proper n-dimensional reshaping and transposing of arrays which should mean a numba tensordot implementation is possible. dgasmith: Adding to the readme and docs landing page would be good. However, I do worry a bit about overall length and the amount of content that we throw at people. I think these pages could be ~20-30% larger without too much issue. More than that is fine, but we would need to cut down on basic `opt_einsum` info in that case to keep the overall length rather short. Overall this is looking great and is pretty close to being ready to go IMO.
diff --git a/docs/source/backends.rst b/docs/source/backends.rst index 11efc96..9f05977 100644 --- a/docs/source/backends.rst +++ b/docs/source/backends.rst @@ -137,7 +137,7 @@ If ``theano`` is installed, using it as backend is as simple as specifiying >>> shapes = (3, 200), (200, 300), (300, 4) >>> expr = oe.contract_expression("ab,bc,cd", *shapes) >>> expr - ContractExpression('ab,bc,cd') + <ContractExpression('ab,bc,cd')> >>> import numpy as np >>> # GPU advantage mainly for low precision numbers diff --git a/docs/source/reusing_paths.rst b/docs/source/reusing_paths.rst index 27a37ff..d920d4a 100644 --- a/docs/source/reusing_paths.rst +++ b/docs/source/reusing_paths.rst @@ -8,7 +8,7 @@ If you expect to repeatedly use a particular contraction it can make things simp >>> my_expr = oe.contract_expression("abc,cd,dbe->ea", (2, 3, 4), (4, 5), (5, 3, 6)) >>> print(my_expr) - <ContractExpression> for 'abc,cd,dbe->ea': + <ContractExpression('abc,cd,dbe->ea')> 1. 'dbe,cd->bce' [GEMM] 2. 'bce,abc->ea' [GEMM] @@ -26,3 +26,109 @@ The ``ContractExpression`` can be called with 3 arrays that match the original s [ 3.67772272, 5.46727192]]) Note that few checks are performed when calling the expression, and while it will work for a set of arrays with the same ranks as the original shapes but differing sizes, it might no longer be optimal. + + +==================== +Specifying Constants +==================== + +Often one generates contraction expressions where some of the tensor arguments +will remain *constant* across many calls. +:func:`~opt_einsum.contract_expression` allows you to specify the indices of +these constant arguments, allowing ``opt_einsum`` to build and then reuse as +many constant contractions as possible. Take for example the equation: + +.. code:: python + + >>> eq = "ij,jk,kl,lm,mn->ni" + +where we know that *only* the first and last tensors will vary between calls. +We can specify this by marking the middle three as constant - we then need to +supply the actual arrays rather than just the shapes to +:func:`~opt_einsum.contract_expression`: + +.. code:: python + + >>> # A B C D E + >>> shapes = [(9, 5), (5, 5), (5, 5), (5, 5), (5, 8)] + + >>> # mark the middle three arrays as constant + >>> constants = [1, 2, 3] + + >>> # generate the constant arrays + >>> B, C, D = [np.random.randn(*shapes[i]) for i in constants] + + >>> # supplied ops are now mix of shapes and arrays + >>> ops = (9, 5), B, C, D, (5, 8) + + >>> expr = oe.contract_expression(eq, *ops, constants=constants) + >>> expr + <ContractExpression('ij,[jk,kl,lm],mn->ni', constants=[1, 2, 3])> + +The expression now only takes the remaining two arrays as arguments (the +tensors with ``'ij'`` and ``'mn'`` indices), and will store as many resuable +constant contractions as possible. + +.. code:: python + + >>> A1, E1 = np.random.rand(*shapes[0]), np.random.rand(*shapes[-1]) + >>> out1 = expr(A1, E1) + >>> out1.shap + (8, 9) + + >>> A2, E2 = np.random.rand(*shapes[0]), np.random.rand(*shapes[-1]) + >>> out2 = expr(A2, E2) + >>> out2.shape + (8, 9) + + >>> np.allclose(out1, out2) + False + + >>> print(expr) + <ContractExpression('ij,[jk,kl,lm],mn->ni', constants=[1, 2, 3])> + 1. 'jm,mn->jn' [GEMM] + 2. 'jn,ij->ni' [GEMM] + +Where we can see that the expression now only has to perform +two contractions to compute the output. + +.. note:: + + The constant part of an expression is lazily generated upon first call, + (specific a particular backend) though it can be explicitly built with call + to :meth:`~opt_einsum.contract.ContractExpression.evaluate_constants`. + +We can confirm the advantage of using expressions and constants by timing the +following scenarios, first setting +``A = np.random.rand(*shapes[0])`` and ``E = np.random.rand(*shapes[-1])``. + +- **contract from scratch:** + +.. code:: python + + >>> %timeit oe.contract(eq, A, B, C, D, E) + 239 µs ± 5.06 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) + +- **contraction with an expression but no constants:** + +.. code:: python + + >>> expr_no_consts = oe.contract_expression(eq, *shapes) + >>> %timeit expr_no_consts(A, B, C, D, E) + 76.7 µs ± 2.47 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) + +- **contraction with an expression and constants marked:** + +.. code:: python + + >>> %timeit expr(A, E) + 40.8 µs ± 1.22 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) + +Although this gives us a rough idea, of course the efficiency savings are +hugely dependent on the size of the contraction and number of possible constant +contractions. + +We also note that even if there are *no* constant contractions to perform, it +can be very advantageous to specify constant tensors for particular backends. +For instance, if a GPU backend is used, the constant tensors will be kept on +the device rather than being transfered each time. diff --git a/opt_einsum/backends.py b/opt_einsum/backends.py index 2a08371..e133d6a 100644 --- a/opt_einsum/backends.py +++ b/opt_einsum/backends.py @@ -62,72 +62,130 @@ def has_einsum(backend): # Tensorflow -def convert_arrays_to_tensorflow(arrays): - """Convert numpy arrays to ``tensorflow.placeholder`` instances. +def to_tensorflow(array, constant=False): + """Convert a numpy array to a ``tensorflow.placeholder`` instance. """ import tensorflow - return [tensorflow.placeholder(x.dtype, x.shape) for x in arrays] + + if isinstance(array, numpy.ndarray): + if constant: + return tensorflow.constant(array, array.dtype, array.shape) + + return tensorflow.placeholder(array.dtype, array.shape) + + return array def build_tensorflow_expression(arrays, expr): """Build a tensorflow function based on ``arrays`` and ``expr``. """ import tensorflow - placeholders = convert_arrays_to_tensorflow(arrays) - graph = expr._normal_contract(placeholders, backend='tensorflow') + + placeholders = [to_tensorflow(array) for array in arrays] + graph = expr._contract(placeholders, backend='tensorflow') def tensorflow_contract(*arrays): session = tensorflow.get_default_session() - return session.run(graph, feed_dict=dict(zip(placeholders, arrays))) + # only want to feed placeholders - constant tensors already have values + feed_dict = {p: a for p, a in zip(placeholders, arrays) if p.op.type == 'Placeholder'} + return session.run(graph, feed_dict=feed_dict) return tensorflow_contract +def evaluate_constants_tensorflow(const_arrays, expr): + """Convert constant arguments to tensorflow constants, and perform any + possible constant contractions. Requires evaluating a tensorflow graph. + """ + import tensorflow + + # compute the partial graph of new inputs + const_arrays = [to_tensorflow(x, constant=True) for x in const_arrays] + new_ops, new_contraction_list = expr(*const_arrays, backend='tensorflow', evaluate_constants=True) + + # evaluate the new inputs and convert to tensorflow constants + session = tensorflow.get_default_session() + new_ops = [None if x is None else to_tensorflow(session.run(x), constant=True) for x in new_ops] + + return new_ops, new_contraction_list + + # Theano -def convert_arrays_to_theano(arrays): - """Convert numpy arrays to ``theano.tensor.TensorType`` instances. +def to_theano(array, constant=False): + """Convert a numpy array to ``theano.tensor.TensorType`` instance. """ import theano - return [theano.tensor.TensorType(dtype=x.dtype, broadcastable=[False] * len(x.shape))() for x in arrays] + + if isinstance(array, numpy.ndarray): + if constant: + return theano.tensor.constant(array) + + return theano.tensor.TensorType(dtype=array.dtype, broadcastable=[False] * len(array.shape))() + + return array def build_theano_expression(arrays, expr): """Build a theano function based on ``arrays`` and ``expr``. """ import theano - in_vars = convert_arrays_to_theano(arrays) - out_var = expr._normal_contract(in_vars, backend='theano') - graph = theano.function(in_vars, out_var) + + in_vars = [to_theano(array) for array in arrays] + out_var = expr._contract(in_vars, backend='theano') + + # don't supply constants to graph + graph_ins = [x for x in in_vars if not isinstance(x, theano.tensor.TensorConstant)] + graph = theano.function(graph_ins, out_var) def theano_contract(*arrays): - return graph(*arrays) + return graph(*[x for x in arrays if not isinstance(x, theano.tensor.TensorConstant)]) return theano_contract +def evaluate_constants_theano(const_arrays, expr): + # compute the partial graph of new inputs + const_arrays = [to_theano(x, constant=True) for x in const_arrays] + new_ops, new_contraction_list = expr(*const_arrays, backend='theano', evaluate_constants=True) + + # evaluate the new inputs and convert to theano shared tensors + new_ops = [None if x is None else to_theano(x.eval(), constant=True) for x in new_ops] + + return new_ops, new_contraction_list + + # Cupy -def convert_arrays_to_cupy(arrays): # pragma: no cover - """Convert numpy arrays to ``cupy.ndarray`` instances. - """ +def to_cupy(array): # pragma: no cover import cupy - return [cupy.asarray(x) for x in arrays] + + if isinstance(array, numpy.ndarray): + return cupy.asarray(array) + + return array def build_cupy_expression(_, expr): # pragma: no cover """Build a cupy function based on ``arrays`` and ``expr``. """ - import cupy def cupy_contract(*arrays): - cupy_arrays = convert_arrays_to_cupy(arrays) - cupy_out = expr._normal_contract(cupy_arrays, backend='cupy') - return cupy.asnumpy(cupy_out) + cupy_arrays = [to_cupy(x) for x in arrays] + cupy_out = expr._contract(cupy_arrays, backend='cupy') + return cupy_out.get() return cupy_contract +def evaluate_constants_cupy(const_arrays, expr): # pragma: no cover + """Convert constant arguments to cupy arrays, and perform any possible + constant contractions. + """ + const_arrays = [to_cupy(x) for x in const_arrays] + return expr(*const_arrays, backend='cupy', evaluate_constants=True) + + # Dispatch to correct expression backend # these are the backends which support explicit to-and-from numpy conversion CONVERT_BACKENDS = { @@ -137,8 +195,22 @@ CONVERT_BACKENDS = { } +PARSE_CONSTS_BACKENDS = { + 'tensorflow': evaluate_constants_tensorflow, + 'theano': evaluate_constants_theano, + 'cupy': evaluate_constants_cupy, +} + + def build_expression(backend, arrays, expr): """Build an expression, based on ``expr`` and initial arrays ``arrays``, that evaluates using backend ``backend``. """ return CONVERT_BACKENDS[backend](arrays, expr) + + +def evaluate_constants(backend, arrays, expr): + """Convert constant arrays to the correct backend, and perform as much of + the contraction of ``expr`` with these as possible. + """ + return PARSE_CONSTS_BACKENDS[backend](arrays, expr) diff --git a/opt_einsum/contract.py b/opt_einsum/contract.py index 651600e..e1fc026 100644 --- a/opt_einsum/contract.py +++ b/opt_einsum/contract.py @@ -402,7 +402,8 @@ def contract(*operands, **kwargs): use_blas = kwargs.pop('use_blas', True) memory_limit = kwargs.pop('memory_limit', None) backend = kwargs.pop('backend', 'numpy') - gen_expression = kwargs.pop('gen_expression', False) + gen_expression = kwargs.pop('_gen_expression', False) + constants_dict = kwargs.pop('_constants_dict', {}) # Make sure remaining keywords are valid for einsum unknown_kwargs = [k for (k, v) in kwargs.items() if k not in valid_einsum_kwargs] @@ -418,17 +419,17 @@ def contract(*operands, **kwargs): # check if performing contraction or just building expression if gen_expression: - return ContractExpression(full_str, contraction_list, **einsum_kwargs) + return ContractExpression(full_str, contraction_list, constants_dict, **einsum_kwargs) return _core_contract(operands, contraction_list, backend=backend, **einsum_kwargs) -def _core_contract(operands, contraction_list, backend='numpy', **einsum_kwargs): +def _core_contract(operands, contraction_list, backend='numpy', evaluate_constants=False, **einsum_kwargs): """Inner loop used to perform an actual contraction given the output from a ``contract_path(..., einsum_call=True)`` call. """ - # Special handeling if out is specified + # Special handling if out is specified out_array = einsum_kwargs.pop('out', None) specified_out = out_array is not None @@ -438,9 +439,13 @@ def _core_contract(operands, contraction_list, backend='numpy', **einsum_kwargs) # Start contraction loop for num, contraction in enumerate(contraction_list): inds, idx_rm, einsum_str, remaining, blas_flag = contraction - tmp_operands = [] - for x in inds: - tmp_operands.append(operands.pop(x)) + + # check if we are performing the pre-pass of an expression with constants, + # if so, break out upon finding first non-constant (None) operand + if evaluate_constants and any(operands[x] is None for x in inds): + return operands, contraction_list[num:] + + tmp_operands = [operands.pop(x) for x in inds] # Do we need to deal with the output? handle_out = specified_out and ((num + 1) == len(contraction_list)) @@ -452,9 +457,7 @@ def _core_contract(operands, contraction_list, backend='numpy', **einsum_kwargs) input_str, results_index = einsum_str.split('->') input_left, input_right = input_str.split(',') - tensor_result = input_left + input_right - for s in idx_rm: - tensor_result = tensor_result.replace(s, "") + tensor_result = "".join(s for s in input_left + input_right if s not in idx_rm) # Find indices to contract over left_pos, right_pos = [], [] @@ -483,7 +486,7 @@ def _core_contract(operands, contraction_list, backend='numpy', **einsum_kwargs) # Do the contraction new_view = _einsum(einsum_str, *tmp_operands, backend=backend, **einsum_kwargs) - # Append new items and derefernce what we can + # Append new items and dereference what we can operands.append(new_view) del tmp_operands, new_view @@ -493,35 +496,109 @@ def _core_contract(operands, contraction_list, backend='numpy', **einsum_kwargs) return operands[0] +def format_const_einsum_str(einsum_str, constants): + """Add brackets to the constant terms in ``einsum_str``. For example: + + >>> format_const_einsum_str('ab,bc,cd->ad', [0, 2]) + 'bc,[ab,cd]->ad' + + No-op if there are no constants. + """ + if not constants: + return einsum_str + + if "->" in einsum_str: + lhs, rhs = einsum_str.split('->') + arrow = "->" + else: + lhs, rhs, arrow = einsum_str, "", "" + + wrapped_terms = ["[{}]".format(t) if i in constants else t for i, t in enumerate(lhs.split(','))] + + formatted_einsum_str = "{}{}{}".format(','.join(wrapped_terms), arrow, rhs) + + # merge adjacent constants + formatted_einsum_str = formatted_einsum_str.replace("],[", ',') + return formatted_einsum_str + + class ContractExpression: """Helper class for storing an explicit ``contraction_list`` which can then be repeatedly called solely with the array arguments. """ - def __init__(self, contraction, contraction_list, **einsum_kwargs): - self.contraction = contraction + def __init__(self, contraction, contraction_list, constants_dict, **einsum_kwargs): self.contraction_list = contraction_list self.einsum_kwargs = einsum_kwargs - self.num_args = len(contraction.split('->')[0].split(',')) + self.contraction = format_const_einsum_str(contraction, constants_dict.keys()) + + # need to know _full_num_args to parse constants with, and num_args to call with + self._full_num_args = contraction.count(',') + 1 + self.num_args = self._full_num_args - len(constants_dict) + + # likewise need to know full contraction list + self._full_contraction_list = contraction_list - def _normal_contract(self, arrays, out=None, backend='numpy'): + self._constants_dict = constants_dict + self._evaluated_constants = {} + self._backend_expressions = {} + + def evaluate_constants(self, backend='numpy'): + """Convert any constant operands to the correct backend form, and + perform as many contractions as possible to create a new list of + operands, stored in ``self._evaluated_constants[backend]``. This also + makes sure ``self.contraction_list`` only contains the remaining, + non-const operations. + """ + # prepare a list of operands, with `None` for non-consts + tmp_const_ops = [self._constants_dict.get(i, None) for i in range(self._full_num_args)] + + # get the new list of operands with constant operations performed, and remaining contractions + new_ops, new_contraction_list = self(*tmp_const_ops, backend=backend, evaluate_constants=True) + self._evaluated_constants[backend] = new_ops + self.contraction_list = new_contraction_list + + def _get_evaluated_constants(self, backend): + """Retrieve or generate the cached list of constant operators (mixed + in with None representing non-consts) and the remaining contraction + list. + """ + try: + return self._evaluated_constants[backend] + except KeyError: + self.evaluate_constants(backend) + return self._evaluated_constants[backend] + + def _get_backend_expression(self, arrays, backend): + try: + return self._backend_expressions[backend] + except KeyError: + fn = backends.build_expression(backend, arrays, self) + self._backend_expressions[backend] = fn + return fn + + def _contract(self, arrays, out=None, backend='numpy', evaluate_constants=False): """The normal, core contraction. """ - return _core_contract(list(arrays), self.contraction_list, out=out, - backend=backend, **self.einsum_kwargs) + contraction_list = self._full_contraction_list if evaluate_constants else self.contraction_list + + return _core_contract(list(arrays), contraction_list, out=out, backend=backend, + evaluate_constants=evaluate_constants, **self.einsum_kwargs) - def _convert_contract(self, arrays, out, backend): + def _contract_with_conversion(self, arrays, out, backend, evaluate_constants=False): """Special contraction, i.e. contraction with a different backend - but converting to and from that backend. Checks for - ``self._{backend}_contract``, generates it if is missing, then calls it + but converting to and from that backend. Retrieves or generates a + cached expression using ``arrays`` as templates, then calls it with ``arrays``. - """ - convert_fn = "_{}_contract".format(backend) - if not hasattr(self, convert_fn): - setattr(self, convert_fn, backends.build_expression(backend, arrays, self)) + If ``evaluate_constants=True``, perform a partial contraction that + prepares the constant tensors and operations with the right backend. + """ + # convert consts to correct type & find reduced contraction list + if evaluate_constants: + return backends.evaluate_constants(backend, arrays, self) - result = getattr(self, convert_fn)(*arrays) + result = self._get_backend_expression(arrays, backend)(*arrays) if out is not None: out[()] = result @@ -543,24 +620,34 @@ class ContractExpression: are supplied then try to convert them to and from the correct backend array type. """ - - if len(arrays) != self.num_args: - raise ValueError("This `ContractExpression` takes exactly %s array arguments " - "but received %s." % (self.num_args, len(arrays))) - - backend = kwargs.pop('backend', 'numpy') out = kwargs.pop('out', None) + backend = kwargs.pop('backend', 'numpy') + evaluate_constants = kwargs.pop('evaluate_constants', False) + if kwargs: raise ValueError("The only valid keyword arguments to a `ContractExpression` " "call are `out=` or `backend=`. Got: %s." % kwargs) + correct_num_args = self._full_num_args if evaluate_constants else self.num_args + + if len(arrays) != correct_num_args: + raise ValueError("This `ContractExpression` takes exactly %s array arguments " + "but received %s." % (self.num_args, len(arrays))) + + if self._constants_dict and not evaluate_constants: + # fill in the missing non-constant terms with newly supplied arrays + ops_var, ops_const = iter(arrays), self._get_evaluated_constants(backend) + ops = [next(ops_var) if op is None else op for op in ops_const] + else: + ops = arrays + try: # Check if the backend requires special preparation / calling # but also ignore non-numpy arrays -> assume user wants same type back - if backend in backends.CONVERT_BACKENDS and isinstance(arrays[0], np.ndarray): - return self._convert_contract(arrays, out, backend) + if backend in backends.CONVERT_BACKENDS and any(isinstance(x, np.ndarray) for x in arrays): + return self._contract_with_conversion(ops, out, backend, evaluate_constants=evaluate_constants) - return self._normal_contract(arrays, out, backend) + return self._contract(ops, out, backend, evaluate_constants=evaluate_constants) except ValueError as err: original_msg = str(err.args) if err.args else "" @@ -571,10 +658,14 @@ class ContractExpression: raise def __repr__(self): - return "ContractExpression('%s')" % self.contraction + if self._constants_dict: + constants_repr = ", constants={}".format(sorted(self._constants_dict)) + else: + constants_repr = "" + return "<ContractExpression('{}'{})>".format(self.contraction, constants_repr) def __str__(self): - s = "<ContractExpression> for '%s':" % self.contraction + s = self.__repr__() for i, c in enumerate(self.contraction_list): s += "\n %i. " % (i + 1) s += "'%s'" % c[2] + (" [%s]" % c[-1] if c[-1] else "") @@ -602,6 +693,13 @@ def contract_expression(subscripts, *shapes, **kwargs): Specifies the subscripts for summation. shapes : sequence of integer tuples Shapes of the arrays to optimize the contraction for. + constants : sequence of int, optional + The indices of any constant arguments in ``shapes``, in which case the + actual array should be supplied at that position rather than just a + shape. If these are specified, then constant parts of the contraction + between calls will be reused. Additionally, if a gpu-enabled backend is + used for example, then the constant tensors will be kept on the gpu, + minimizing transfers. kwargs : Passed on to ``contract_path`` or ``einsum``. See ``contract``. @@ -621,15 +719,29 @@ def contract_expression(subscripts, *shapes, **kwargs): - The generated expression will work with any arrays which have the same rank (number of dimensions) as the original shapes, however, if the actual sizes are different, the expression may no longer be optimal. + - Constant operations will be computed upon first call with a particular + backend, then subsequently reused. Examples -------- - >>> expr = contract_expression("ab,bc->ac", (3, 4), (4, 5)) - >>> a, b = np.random.rand(3, 4), np.random.rand(4, 5) - >>> c = expr(a, b) - >>> np.allclose(c, a @ b) - True + Basic usage: + + >>> expr = contract_expression("ab,bc->ac", (3, 4), (4, 5)) + >>> a, b = np.random.rand(3, 4), np.random.rand(4, 5) + >>> c = expr(a, b) + >>> np.allclose(c, a @ b) + True + + Supply ``a`` as a constant: + + >>> expr = contract_expression("ab,bc->ac", a, (4, 5), constants=[0]) + >>> expr + <ContractExpression('[ab],bc->ac', constants=[0])> + + >>> c = expr(b) + >>> np.allclose(c, a @ b) + True """ if not kwargs.get('optimize', True): @@ -640,6 +752,14 @@ def contract_expression(subscripts, *shapes, **kwargs): raise ValueError("'%s' should only be specified when calling a " "`ContractExpression`, not when building it." % arg) - dummy_arrays = [_ShapeOnly(s) for s in shapes] + kwargs['_gen_expression'] = True + + # build dict of constant indices mapped to arrays + constants = kwargs.pop('constants', ()) + constants_dict = {i: shapes[i] for i in constants} + kwargs['_constants_dict'] = constants_dict + + # apart from constant arguments, make dummy arrays + dummy_arrays = [s if i in constants else _ShapeOnly(s) for i, s in enumerate(shapes)] - return contract(subscripts, *dummy_arrays, gen_expression=True, **kwargs) + return contract(subscripts, *dummy_arrays, **kwargs)
Optimal path with constant arguments I have an application where there is a large tensor contraction in the inner loop, with some arrays that are fixed and others that are updated between iterations. Operations only involving the constant terms could done ahead of time (outside the loop), but this may or may not actually improve performance. It could be nice if opt-einsum had a way to mark some arguments as constant, in which case the cost of contracting them them could neglected when computing the cost of paths only involving these arguments.
dgasmith/opt_einsum
diff --git a/opt_einsum/tests/test_backends.py b/opt_einsum/tests/test_backends.py index b2e43ba..19d72c2 100644 --- a/opt_einsum/tests/test_backends.py +++ b/opt_einsum/tests/test_backends.py @@ -6,6 +6,7 @@ from opt_einsum import contract, helpers, contract_expression, backends try: import tensorflow as tf found_tensorflow = True + sess = tf.Session() except ImportError: found_tensorflow = False @@ -57,52 +58,131 @@ def test_tensorflow(string): shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) - sess = tf.Session() with sess.as_default(): expr(*views, backend='tensorflow', out=opt) assert np.allclose(ein, opt) # test non-conversion mode - tensorflow_views = backends.convert_arrays_to_tensorflow(views) + tensorflow_views = [backends.to_tensorflow(view) for view in views] expr(*tensorflow_views, backend='tensorflow') [email protected](not found_cupy, reason="Cupy not installed.") [email protected](not found_tensorflow, reason="Tensorflow not installed.") +def test_tensorflow_with_constants(): + eq = 'ij,jk,kl->li' + shapes = (2, 3), (3, 4), (4, 5) + constants = {0, 2} + ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] + var = np.random.rand(*shapes[1]) + + res_exp = contract(eq, ops[0], var, ops[2]) + + expr = contract_expression(eq, *ops, constants=constants) + + # check tensorflow + with sess.as_default(): + res_got = expr(var, backend='tensorflow') + assert 'tensorflow' in expr._evaluated_constants + assert np.allclose(res_exp, res_got) + + # check can call with numpy still + res_got2 = expr(var, backend='numpy') + assert np.allclose(res_exp, res_got2) + + # check tensorflow call returns tensorflow still + res_got3 = expr(backends.to_tensorflow(var), backend='tensorflow') + assert isinstance(res_got3, tf.Tensor) + + [email protected](not found_theano, reason="Theano not installed.") @pytest.mark.parametrize("string", tests) -def test_cupy(string): # pragma: no cover +def test_theano(string): views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) - opt = expr(*views, backend='cupy') + opt = expr(*views, backend='theano') assert np.allclose(ein, opt) # test non-conversion mode - cupy_views = backends.convert_arrays_to_cupy(views) - cupy_opt = expr(*cupy_views, backend='cupy') - assert isinstance(cupy_opt, cupy.ndarray) - assert np.allclose(ein, cupy.asnumpy(cupy_opt)) + theano_views = [backends.to_theano(view) for view in views] + theano_opt = expr(*theano_views, backend='theano') + assert isinstance(theano_opt, theano.tensor.TensorVariable) [email protected](not found_theano, reason="Theano not installed.") [email protected](not found_theano, reason="theano not installed.") +def test_theano_with_constants(): + eq = 'ij,jk,kl->li' + shapes = (2, 3), (3, 4), (4, 5) + constants = {0, 2} + ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] + var = np.random.rand(*shapes[1]) + + res_exp = contract(eq, ops[0], var, ops[2]) + + expr = contract_expression(eq, *ops, constants=constants) + + # check theano + res_got = expr(var, backend='theano') + assert 'theano' in expr._evaluated_constants + assert np.allclose(res_exp, res_got) + + # check can call with numpy still + res_got2 = expr(var, backend='numpy') + assert np.allclose(res_exp, res_got2) + + # check theano call returns theano still + res_got3 = expr(backends.to_theano(var), backend='theano') + assert isinstance(res_got3, theano.tensor.TensorVariable) + + [email protected](not found_cupy, reason="Cupy not installed.") @pytest.mark.parametrize("string", tests) -def test_theano(string): +def test_cupy(string): # pragma: no cover views = helpers.build_views(string) ein = contract(string, *views, optimize=False, use_blas=False) shps = [v.shape for v in views] expr = contract_expression(string, *shps, optimize=True) - opt = expr(*views, backend='theano') + opt = expr(*views, backend='cupy') assert np.allclose(ein, opt) # test non-conversion mode - theano_views = backends.convert_arrays_to_theano(views) - theano_opt = expr(*theano_views, backend='theano') - assert isinstance(theano_opt, theano.tensor.TensorVariable) + cupy_views = [backends.to_cupy(view) for view in views] + cupy_opt = expr(*cupy_views, backend='cupy') + assert isinstance(cupy_opt, cupy.ndarray) + assert np.allclose(ein, cupy.asnumpy(cupy_opt)) + + [email protected](not found_cupy, reason="Cupy not installed.") +def test_cupy_with_constants(): + eq = 'ij,jk,kl->li' + shapes = (2, 3), (3, 4), (4, 5) + constants = {0, 2} + ops = [np.random.rand(*shp) if i in constants else shp for i, shp in enumerate(shapes)] + var = np.random.rand(*shapes[1]) + + res_exp = contract(eq, ops[0], var, ops[2]) + + expr = contract_expression(eq, *ops, constants=constants) + + # check cupy + res_got = expr(var, backend='cupy') + assert 'cupy' in expr._evaluated_constants + assert np.allclose(res_exp, res_got) + + # check can call with numpy still + res_got2 = expr(var, backend='numpy') + assert np.allclose(res_exp, res_got2) + + # check cupy call returns cupy still + res_got3 = expr(cupy.asarray(var), backend='cupy') + assert isinstance(res_got3, cupy.ndarray) + assert np.allclose(res_exp, res_got3.get()) @pytest.mark.skipif(not found_dask, reason="Dask not installed.") diff --git a/opt_einsum/tests/test_contract.py b/opt_einsum/tests/test_contract.py index e9c84ce..c439326 100644 --- a/opt_einsum/tests/test_contract.py +++ b/opt_einsum/tests/test_contract.py @@ -205,3 +205,31 @@ def test_contract_expressions(string, optimize, use_blas, out_spec): assert string in expr.__repr__() assert string in expr.__str__() + [email protected]("string,constants", [ + ('hbc,bdef,cdkj,ji,ikeh,lfo', [1, 2, 3, 4]), + ('bdef,cdkj,ji,ikeh,hbc,lfo', [0, 1, 2, 3]), + ('hbc,bdef,cdkj,ji,ikeh,lfo', [1, 2, 3, 4]), + ('hbc,bdef,cdkj,ji,ikeh,lfo', [1, 2, 3, 4]), + ('ijab,acd,bce,df,ef->ji', [1, 2, 3, 4]), + ('ab,cd,ad,cb', [1, 3]), + ('ab,bc,cd', [0, 1]), +]) +def test_contract_expression_with_constants(string, constants): + views = helpers.build_views(string) + expected = contract(string, *views, optimize=False, use_blas=False) + + shapes = [view.shape for view in views] + + expr_args = [] + ctrc_args = [] + for i, (shape, view) in enumerate(zip(shapes, views)): + if i in constants: + expr_args.append(view) + else: + expr_args.append(shape) + ctrc_args.append(view) + + expr = contract_expression(string, *expr_args, constants=constants) + out = expr(*ctrc_args) + assert np.allclose(expected, out)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 4 }
2.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 importlib-metadata==4.8.3 iniconfig==1.1.1 numpy==1.19.5 -e git+https://github.com/dgasmith/opt_einsum.git@45eb0220dc00186c0dfd0de27e6dd725a8d6e252#egg=opt_einsum packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: opt_einsum channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/opt_einsum
[ "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[hbc,bdef,cdkj,ji,ikeh,lfo-constants0]", "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[bdef,cdkj,ji,ikeh,hbc,lfo-constants1]", "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[hbc,bdef,cdkj,ji,ikeh,lfo-constants2]", "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[hbc,bdef,cdkj,ji,ikeh,lfo-constants3]", "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[ijab,acd,bce,df,ef->ji-constants4]", "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[ab,cd,ad,cb-constants5]", "opt_einsum/tests/test_contract.py::test_contract_expression_with_constants[ab,bc,cd-constants6]" ]
[]
[ "opt_einsum/tests/test_contract.py::test_compare[a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_compare[a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_compare[ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_compare[ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare[abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare[acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_compare[acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_compare[cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_compare[abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_compare[bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_compare[chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_compare[chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_compare[bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_compare[ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_compare[ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_compare[ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_compare[ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_compare[ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_compare[ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_compare[eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_compare[dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_compare[bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_compare[dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_compare[fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_compare[abcd,ad]", "opt_einsum/tests/test_contract.py::test_compare[ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_compare[baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_compare[bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_compare[fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_compare[efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_compare[ab,ab]", "opt_einsum/tests/test_contract.py::test_compare[ab,ba]", "opt_einsum/tests/test_contract.py::test_compare[abc,abc]", "opt_einsum/tests/test_contract.py::test_compare[abc,bac]", "opt_einsum/tests/test_contract.py::test_compare[abc,cba]", "opt_einsum/tests/test_contract.py::test_compare[ab,bc]", "opt_einsum/tests/test_contract.py::test_compare[ab,cb]", "opt_einsum/tests/test_contract.py::test_compare[ba,bc]", "opt_einsum/tests/test_contract.py::test_compare[ba,cb]", "opt_einsum/tests/test_contract.py::test_compare[abcd,cd]", "opt_einsum/tests/test_contract.py::test_compare[abcd,ab]", "opt_einsum/tests/test_contract.py::test_compare[abcd,cdef]", "opt_einsum/tests/test_contract.py::test_compare[abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_compare[abcd,efdc]", "opt_einsum/tests/test_contract.py::test_compare[aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_compare[ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare[aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare[baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare[aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_compare[aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_compare[ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_compare[bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_compare[bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_compare[bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_compare[fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_compare[afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_compare[adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_compare[bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_compare[dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_compare[aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,ad]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ab]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,ba]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abc,abc]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abc,bac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abc,cba]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,bc]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,cb]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ba,bc]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ba,cb]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,cd]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,ab]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,cdef]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[abcd,efdc]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_drop_in_replacement[aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_compare_greek[a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_compare_greek[a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_compare_greek[ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_greek[ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_greek[acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_compare_greek[acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_compare_greek[cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_compare_greek[abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_compare_greek[bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_compare_greek[chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_compare_greek[chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_compare_greek[bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_compare_greek[eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_compare_greek[dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_compare_greek[bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_compare_greek[dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_compare_greek[fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,ad]", "opt_einsum/tests/test_contract.py::test_compare_greek[ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_compare_greek[baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_compare_greek[bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_compare_greek[fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_compare_greek[efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ab]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,ba]", "opt_einsum/tests/test_contract.py::test_compare_greek[abc,abc]", "opt_einsum/tests/test_contract.py::test_compare_greek[abc,bac]", "opt_einsum/tests/test_contract.py::test_compare_greek[abc,cba]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,bc]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,cb]", "opt_einsum/tests/test_contract.py::test_compare_greek[ba,bc]", "opt_einsum/tests/test_contract.py::test_compare_greek[ba,cb]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,cd]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,ab]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,cdef]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_compare_greek[abcd,efdc]", "opt_einsum/tests/test_contract.py::test_compare_greek[aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_compare_greek[ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_compare_greek[bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_compare_greek[bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_compare_greek[bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_compare_greek[fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_compare_greek[afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_compare_greek[adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_compare_greek[bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_compare_greek[dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_compare_greek[aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_compare_blas[a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_compare_blas[a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_compare_blas[ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_blas[ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_blas[acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_compare_blas[acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_compare_blas[cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_compare_blas[abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_compare_blas[bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_compare_blas[chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_compare_blas[chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_compare_blas[bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_compare_blas[eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_compare_blas[dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_compare_blas[bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_compare_blas[dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_compare_blas[fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,ad]", "opt_einsum/tests/test_contract.py::test_compare_blas[ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_compare_blas[baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_compare_blas[bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_compare_blas[fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_compare_blas[efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ab]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,ba]", "opt_einsum/tests/test_contract.py::test_compare_blas[abc,abc]", "opt_einsum/tests/test_contract.py::test_compare_blas[abc,bac]", "opt_einsum/tests/test_contract.py::test_compare_blas[abc,cba]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,bc]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,cb]", "opt_einsum/tests/test_contract.py::test_compare_blas[ba,bc]", "opt_einsum/tests/test_contract.py::test_compare_blas[ba,cb]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,cd]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,ab]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,cdef]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_compare_blas[abcd,efdc]", "opt_einsum/tests/test_contract.py::test_compare_blas[aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_compare_blas[ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas[bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_compare_blas[bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_compare_blas[bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_compare_blas[fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_compare_blas[afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_compare_blas[adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_compare_blas[bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_compare_blas[dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_compare_blas[aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,ad]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ab]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,ba]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abc,abc]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abc,bac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abc,cba]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,bc]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,cb]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ba,bc]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ba,cb]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,cd]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,ab]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,cdef]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[abcd,efdc]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_compare_blas_greek[aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_some_non_alphabet_maintains_order", "opt_einsum/tests/test_contract.py::test_printing", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-greedy-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-False-optimal-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-greedy-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[False-True-optimal-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-greedy-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-False-optimal-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-greedy-aef,fbc,dca->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-a,ab,abc->abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-a,b,ab->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ea,fb,gc,hd,abcd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ea,fb,abcd,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,ea,fb,gc,hd->efgh]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac0]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-acdf,jbje,gihb,hfac,gfac,gifabc,hfac1]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-cd,bdhe,aidb,hgca,gc,hgibcd,hgac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abhe,hidj,jgba,hiab,gab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bde,cdh,agdb,hica,ibd,hgicd,hiac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-chd,bde,agbc,hiad,hgc,hgi,hiad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-chd,bde,agbc,hiad,bdi,cgh,agdb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bdhe,acad,hiab,agac,hibd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab,c->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab,c->c]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab,cd,cd->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab,cd,cd->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab,cd,cd->cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab,cd,cd,ef,ef->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,cd,ef->abcdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,cd,ef->acdf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,cd,de->abcde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,cd,de->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,bcd,cd->abcd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,bcd,cd->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-eb,cb,fb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-dd,fb,be,cdb->cef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bca,cdb,dbf,afc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-dcc,fce,ea,dbf->ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-fdf,cdd,ccd,afe->ae]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,ad]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ed,fcd,ff,bcf->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-baa,dcf,af,cde->be]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bd,db,eac->ace]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-fff,fae,bef,def->abd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-efc,dbc,acf,fd->abe]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,ba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abc,abc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abc,bac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abc,cba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ba,bc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ba,cb]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,cd]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,ab]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,cdef]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,cdef->feba]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-abcd,efdc]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-aab,bc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-aab,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-baa,bcc->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-aab,ccb->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-aab,fa,df,ecc->bde]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-ecb,fef,bad,ed->ac]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bcf,bbb,fbf,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bb,ff,be->e]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bcb,bb,fc,fff->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-fbb,dfd,fc,fc->]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-afd,ba,cc,dc->bf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-adb,bc,fa,cfc->d]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-bbd,bda,fc,db->acf]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-dba,ead,cad->bce]", "opt_einsum/tests/test_contract.py::test_contract_expressions[True-True-optimal-aef,fbc,dca->bde]" ]
[]
MIT License
2,759
[ "opt_einsum/contract.py", "opt_einsum/backends.py", "docs/source/backends.rst", "docs/source/reusing_paths.rst" ]
[ "opt_einsum/contract.py", "opt_einsum/backends.py", "docs/source/backends.rst", "docs/source/reusing_paths.rst" ]
conan-io__conan-3192
0e1a608aa997760d0012027f4168e0017d63e62f
2018-07-10 20:45:00
b02cce4e78d5982e00b66f80a683465b3c679033
diff --git a/.gitignore b/.gitignore index ee466be1d..aa6c567e3 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,7 @@ htmlcov/ nosetests.xml coverage.xml *,cover +.pytest_cache/ # Translations *.mo diff --git a/conans/client/cmd/export.py b/conans/client/cmd/export.py index 5f508dab7..65e0d709d 100644 --- a/conans/client/cmd/export.py +++ b/conans/client/cmd/export.py @@ -76,15 +76,16 @@ def _capture_export_scm_data(conanfile, conanfile_dir, destination_folder, outpu scm = SCM(scm_data, conanfile_dir) if scm_data.url == "auto": - origin = scm.get_remote_url() + origin = scm.get_qualified_remote_url() if not origin: raise ConanException("Repo origin cannot be deduced by 'auto'") - if os.path.exists(origin): + if scm.is_local_repository(): output.warn("Repo origin looks like a local path: %s" % origin) - origin = origin.replace("\\", "/") output.success("Repo origin deduced by 'auto': %s" % origin) scm_data.url = origin if scm_data.revision == "auto": + if not scm.is_pristine(): + output.warn("Repo status is not pristine: there might be modified files") scm_data.revision = scm.get_revision() output.success("Revision deduced by 'auto': %s" % scm_data.revision) diff --git a/conans/client/source.py b/conans/client/source.py index 103141b04..8d28db9f4 100644 --- a/conans/client/source.py +++ b/conans/client/source.py @@ -198,6 +198,5 @@ def _fetch_scm(scm_data, dest_dir, local_sources_path, output): else: output.info("Getting sources from url: '%s'" % scm_data.url) scm = SCM(scm_data, dest_dir) - scm.clone() scm.checkout() _clean_source_folder(dest_dir) diff --git a/conans/client/tools/scm.py b/conans/client/tools/scm.py index a59f5a29c..132c7de7d 100644 --- a/conans/client/tools/scm.py +++ b/conans/client/tools/scm.py @@ -1,19 +1,25 @@ import os +import sys +import re + import subprocess -from six.moves.urllib.parse import urlparse, quote_plus +from six.moves.urllib.parse import urlparse, quote_plus, unquote from subprocess import CalledProcessError, PIPE, STDOUT +import platform from conans.client.tools.env import no_op, environment_append from conans.client.tools.files import chdir from conans.errors import ConanException +from conans.model.version import Version from conans.util.files import decode_text, to_file_bytes, walk -class Git(object): +class SCMBase(object): + cmd_command = None - def __init__(self, folder=None, verify_ssl=True, username=None, password=None, - force_english=True, runner=None): + def __init__(self, folder=None, verify_ssl=True, username=None, password=None, force_english=True, + runner=None): self.folder = folder or os.getcwd() if not os.path.exists(self.folder): os.makedirs(self.folder) @@ -24,17 +30,14 @@ class Git(object): self._runner = runner def run(self, command): - command = "git %s" % command + command = "%s %s" % (self.cmd_command, command) with chdir(self.folder) if self.folder else no_op(): with environment_append({"LC_ALL": "en_US.UTF-8"}) if self._force_eng else no_op(): if not self._runner: - return subprocess.check_output(command, shell=True).decode().strip() + return decode_text(subprocess.check_output(command, shell=True).strip()) else: return self._runner(command) - def get_repo_root(self): - return self.run("rev-parse --show-toplevel") - def get_url_with_credentials(self, url): if not self._username or not self._password: return url @@ -46,7 +49,12 @@ class Git(object): url = url.replace("://", "://" + user_enc + ":" + pwd_enc + "@", 1) return url + +class Git(SCMBase): + cmd_command = "git" + def _configure_ssl_verify(self): + # TODO: This should be a context manager return self.run("config http.sslVerify %s" % ("true" if self._verify_ssl else "false")) def clone(self, url, branch=None): @@ -106,20 +114,18 @@ class Git(object): def get_remote_url(self, remote_name=None): self._check_git_repo() remote_name = remote_name or "origin" - try: - remotes = self.run("remote -v") - for remote in remotes.splitlines(): - try: - name, url = remote.split(None, 1) - url, _ = url.rsplit(None, 1) - if name == remote_name: - return url - except Exception: - pass - except subprocess.CalledProcessError: - pass + remotes = self.run("remote -v") + for remote in remotes.splitlines(): + name, url = remote.split(None, 1) + if name == remote_name: + url, _ = url.rsplit(None, 1) + return url return None + def is_local_repository(self): + url = self.get_remote_url() + return os.path.exists(url) + def get_commit(self): self._check_git_repo() try: @@ -127,15 +133,21 @@ class Git(object): commit = commit.strip() return commit except Exception as e: - raise ConanException("Unable to get git commit from %s\n%s" % (self.folder, str(e))) + raise ConanException("Unable to get git commit from '%s': %s" % (self.folder, str(e))) get_revision = get_commit - def _check_git_repo(self): - try: - self.run("status") - except Exception: - raise ConanException("Not a valid git repository") + def is_pristine(self): + self._check_git_repo() + status = self.run("status --porcelain").strip() + if not status: + return True + else: + return False + + def get_repo_root(self): + self._check_git_repo() + return self.run("rev-parse --show-toplevel") def get_branch(self): self._check_git_repo() @@ -145,4 +157,136 @@ class Git(object): branch = status.splitlines()[0].split("...")[0].strip("#").strip() return branch except Exception as e: - raise ConanException("Unable to get git branch from %s\n%s" % (self.folder, str(e))) + raise ConanException("Unable to get git branch from %s: %s" % (self.folder, str(e))) + + def _check_git_repo(self): + try: + self.run("status") + except Exception: + raise ConanException("Not a valid git repository") + + +class SVN(SCMBase): + cmd_command = "svn" + file_protocol = 'file:///' if platform.system() == "Windows" else 'file://' + API_CHANGE_VERSION = Version("1.10") # CLI changes in 1.9.x + + def __init__(self, folder=None, runner=None, *args, **kwargs): + def runner_no_strip(command): + return decode_text(subprocess.check_output(command, shell=True)) + runner = runner or runner_no_strip + super(SVN, self).__init__(folder=folder, runner=runner, *args, **kwargs) + + @staticmethod + def get_version(): + try: + out, err = subprocess.Popen(["svn", "--version"], stdout=subprocess.PIPE).communicate() + version_line = decode_text(out).split('\n', 1)[0] + version_str = version_line.split(' ', 3)[2] + return Version(version_str) + except Exception as e: + raise ConanException("Error retrieving SVN version: '{}'".format(e)) + + @property + def version(self): + if not hasattr(self, '_version'): + version = SVN.get_version() + setattr(self, '_version', version) + return getattr(self, '_version') + + def run(self, command): + # Ensure we always pass some params + extra_options = " --no-auth-cache --non-interactive" + if not self._verify_ssl: + if self.version >= SVN.API_CHANGE_VERSION: + extra_options += " --trust-server-cert-failures=unknown-ca" + else: + extra_options += " --trust-server-cert" + return super(SVN, self).run(command="{} {}".format(command, extra_options)) + + def checkout(self, url, revision="HEAD"): + output = "" + try: + self._check_svn_repo() + except ConanException: + output += self.run('co "{url}" .'.format(url=url)) + else: + assert url.lower() == self.get_remote_url().lower(), \ + "%s != %s" % (url, self.get_remote_url()) + output += self.run("revert . --recursive") + finally: + output += self.update(revision=revision) + return output + + def update(self, revision='HEAD'): + self._check_svn_repo() + return self.run("update -r {rev}".format(rev=revision)) + + def excluded_files(self): + self._check_svn_repo() + excluded_list = [] + output = self.run("status --no-ignore") + for it in output.splitlines(): + if it[0] == 'I': # Only ignored files + filepath = it[8:].strip() + excluded_list.append(os.path.normpath(filepath)) + return excluded_list + + def get_remote_url(self): + return self.run("info --show-item url").strip() + + def get_qualified_remote_url(self): + # Return url with peg revision + url = self.get_remote_url() + revision = self.get_last_changed_revision() + return "{url}@{revision}".format(url=url, revision=revision) + + def is_local_repository(self): + url = self.get_remote_url() + return url.startswith(self.file_protocol) and \ + os.path.exists(unquote(url[len(self.file_protocol):])) + + def is_pristine(self): + # Check if working copy is pristine/consistent + output = self.run("status -u -r {}".format(self.get_revision())) + offending_columns = [0, 1, 2, 3, 4, 6, 7, 8] # 5th column informs if the file is locked (7th is always blank) + + for item in output.splitlines()[:-1]: + if item[0] == '?': # Untracked file + continue + if any(item[i] != ' ' for i in offending_columns): + return False + + return True + + def get_revision(self): + return self.run("info --show-item revision").strip() + + def get_repo_root(self): + return self.run("info --show-item wc-root").strip() + + def get_last_changed_revision(self, use_wc_root=True): + if use_wc_root: + return self.run('info "{root}" --show-item last-changed-revision'.format( + root=self.get_repo_root())).strip() + else: + return self.run("info --show-item last-changed-revision").strip() + + def get_branch(self): + url = self.run("info --show-item relative-url").strip() + try: + pattern = "(tags|branches)/[^/]+|trunk" + branch = re.search(pattern, url) + + if branch is None: + return None + else: + return branch.group(0) + except Exception as e: + raise ConanException("Unable to get svn branch from %s: %s" % (self.folder, str(e))) + + def _check_svn_repo(self): + try: + self.run("info") + except Exception: + raise ConanException("Not a valid SVN repository") \ No newline at end of file diff --git a/conans/model/scm.py b/conans/model/scm.py index 3e1231026..07ae934b7 100644 --- a/conans/model/scm.py +++ b/conans/model/scm.py @@ -1,6 +1,7 @@ import json +import sys -from conans.client.tools.scm import Git +from conans.client.tools.scm import Git, SVN from conans.errors import ConanException @@ -44,22 +45,25 @@ class SCM(object): self.repo = self._get_repo() def _get_repo(self): - repo = {"git": Git(self.repo_folder, verify_ssl=self._data.verify_ssl, - username=self._data.username, - password=self._data.password)}.get(self._data.type) - if not repo: + repo_class = {"git": Git, "svn": SVN}.get(self._data.type) + if not repo_class: raise ConanException("SCM not supported: %s" % self._data.type) - return repo + + return repo_class(folder=self.repo_folder, verify_ssl=self._data.verify_ssl, + username=self._data.username, password=self._data.password) @property def excluded_files(self): return self.repo.excluded_files() - def clone(self): - return self.repo.clone(self._data.url) - def checkout(self): - return self.repo.checkout(self._data.revision, submodule=self._data.submodule) + output= "" + if self._data.type == "git": + output += self.repo.clone(url=self._data.url) + output += self.repo.checkout(element=self._data.revision, submodule=self._data.submodule) + else: + output += self.repo.checkout(url=self._data.url, revision=self._data.revision) + return output def get_remote_url(self): return self.repo.get_remote_url() @@ -67,5 +71,18 @@ class SCM(object): def get_revision(self): return self.repo.get_revision() + def is_pristine(self): + return self.repo.is_pristine() + def get_repo_root(self): return self.repo.get_repo_root() + + def get_qualified_remote_url(self): + if self._data.type == "git": + return self.repo.get_remote_url() + else: + return self.repo.get_qualified_remote_url() + + def is_local_repository(self): + return self.repo.is_local_repository() +
[SCM] Support svn We currently use Subversion and want to make use of the source() method for retrieving the _exact_ source for the package i.e. we don’t want to use exports_sources due to it only being a snapshot and we can’t use the scm attribute. We also want our source co-located with the recipe. Therefore we would like something similar to the following in our repo: - conanfile.py - include - foo.h - lib - foo.cpp Within the recipe we would have something like: NOTE: pseudo code ``` def source(self): svn co url-to-check-out . def build(self) # make sure we have the latest source svn update # now build cmake = CMake(self) … ``` Question 1: This appears to be against recommended practice, i.e. exports_sources appears to be recommended for co-located source and recipe. So, is this a bad idea? If so, why? Question 2: For active development, we think we will need to do ‘svn update’ within the build() method due to the caching of source. Is this a bad idea? We actually agree with @liberforce and the issue described within #3084, in that it appears strange there is no way to override the caching of source as during active development the source will change often (tracking HEAD) but the recipe will not.
conan-io/conan
diff --git a/conans/test/functional/scm_test.py b/conans/test/functional/scm_test.py index 559223f72..cc73eb620 100644 --- a/conans/test/functional/scm_test.py +++ b/conans/test/functional/scm_test.py @@ -3,11 +3,11 @@ import os import unittest from collections import namedtuple -from conans.client.tools.scm import Git +from conans.client.tools.scm import Git, SVN from conans.model.ref import ConanFileReference, PackageReference from conans.model.scm import SCMData from conans.test.utils.test_files import temp_folder -from conans.test.utils.tools import TestClient, TestServer, create_local_git_repo +from conans.test.utils.tools import TestClient, TestServer, create_local_git_repo, SVNLocalRepoTestCase from conans.util.files import load, rmdir, save, to_file_bytes from conans.client.tools.win import get_cased_path @@ -21,7 +21,7 @@ class ConanLib(ConanFile): version = "0.1" short_paths = True scm = {{ - "type": "git", + "type": "%s", "url": "{url}", "revision": "{revision}", }} @@ -32,8 +32,11 @@ class ConanLib(ConanFile): self.output.warn(tools.load(path)) ''' +base_git = base % "git" +base_svn = base % "svn" + -class SCMTest(unittest.TestCase): +class GitSCMTest(unittest.TestCase): def setUp(self): self.reference = ConanFileReference.loads("lib/0.1@user/channel") @@ -88,7 +91,7 @@ class ConanLib(ConanFile): def test_auto_filesystem_remote_git(self): # https://github.com/conan-io/conan/issues/3109 - conanfile = base.format(directory="None", url="auto", revision="auto") + conanfile = base_git.format(directory="None", url="auto", revision="auto") repo = temp_folder() self.client.save({"conanfile.py": conanfile, "myfile.txt": "My file is copied"}, repo) self.client.runner("git init .", cwd=repo) @@ -105,7 +108,7 @@ class ConanLib(ConanFile): def test_auto_git(self): curdir = get_cased_path(self.client.current_folder).replace("\\", "/") - conanfile = base.format(directory="None", url="auto", revision="auto") + conanfile = base_git.format(directory="None", url="auto", revision="auto") self.client.save({"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) self._commit_contents() error = self.client.run("export . user/channel", ignore_error=True) @@ -132,7 +135,7 @@ class ConanLib(ConanFile): # Export again but now with absolute reference, so no pointer file is created nor kept git = Git(curdir) - self.client.save({"conanfile.py": base.format(url=curdir, revision=git.get_revision())}) + self.client.save({"conanfile.py": base_git.format(url=curdir, revision=git.get_revision())}) self.client.run("create . user/channel") sources_dir = self.client.client_cache.scm_folder(self.reference) self.assertFalse(os.path.exists(sources_dir)) @@ -143,9 +146,9 @@ class ConanLib(ConanFile): def test_auto_subfolder(self): curdir = self.client.current_folder.replace("\\", "/") - conanfile = base.replace('"revision": "{revision}"', - '"revision": "{revision}",\n ' - '"subfolder": "mysub"') + conanfile = base_git.replace('"revision": "{revision}"', + '"revision": "{revision}",\n ' + '"subfolder": "mysub"') conanfile = conanfile.replace("short_paths = True", "short_paths = False") conanfile = conanfile.format(directory="None", url="auto", revision="auto") self.client.save({"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) @@ -162,7 +165,7 @@ class ConanLib(ConanFile): Conanfile is not in the root of the repo: https://github.com/conan-io/conan/issues/3465 """ curdir = get_cased_path(self.client.current_folder).replace("\\", "/") - conanfile = base.format(url="auto", revision="auto") + conanfile = base_git.format(url="auto", revision="auto") self.client.save({"conan/conanfile.py": conanfile, "myfile.txt": "content of my file"}) self._commit_contents() self.client.runner('git remote add origin https://myrepo.com.git', cwd=curdir) @@ -175,7 +178,7 @@ class ConanLib(ConanFile): def test_deleted_source_folder(self): path, commit = create_local_git_repo({"myfile": "contents"}, branch="my_release") curdir = self.client.current_folder.replace("\\", "/") - conanfile = base.format(url="auto", revision="auto") + conanfile = base_git.format(url="auto", revision="auto") self.client.save({"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) self._commit_contents() self.client.runner('git remote add origin "%s"' % path.replace("\\", "/"), cwd=curdir) @@ -189,8 +192,8 @@ class ConanLib(ConanFile): self.assertTrue(error) self.assertIn("Getting sources from url: '%s'" % path.replace("\\", "/"), self.client.out) - def test_excluded_repo_files(self): - conanfile = base.format(url="auto", revision="auto") + def test_excluded_repo_fies(self): + conanfile = base_git.format(url="auto", revision="auto") conanfile = conanfile.replace("short_paths = True", "short_paths = False") path, commit = create_local_git_repo({"myfile": "contents", "ignored.pyc": "bin", @@ -222,7 +225,7 @@ other_folder/excluded_subfolder def test_local_source(self): curdir = self.client.current_folder - conanfile = base.format(url="auto", revision="auto") + conanfile = base_git.format(url="auto", revision="auto") conanfile += """ def source(self): self.output.warn("SOURCE METHOD CALLED") @@ -240,7 +243,7 @@ other_folder/excluded_subfolder # Export again but now with absolute reference, so no pointer file is created nor kept git = Git(curdir.replace("\\", "/")) - conanfile = base.format(url=curdir.replace("\\", "/"), revision=git.get_revision()) + conanfile = base_git.format(url=curdir.replace("\\", "/"), revision=git.get_revision()) conanfile += """ def source(self): self.output.warn("SOURCE METHOD CALLED") @@ -257,9 +260,9 @@ other_folder/excluded_subfolder def test_local_source_subfolder(self): curdir = self.client.current_folder - conanfile = base.replace('"revision": "{revision}"', - '"revision": "{revision}",\n ' - '"subfolder": "mysub"') + conanfile = base_git.replace('"revision": "{revision}"', + '"revision": "{revision}",\n ' + '"subfolder": "mysub"') conanfile = conanfile.format(url="auto", revision="auto") conanfile += """ def source(self): @@ -279,7 +282,7 @@ other_folder/excluded_subfolder self.client = TestClient(servers=self.servers, users={"myremote": [("lasote", "mypass")]}) curdir = self.client.current_folder.replace("\\", "/") - conanfile = base.format(url="auto", revision="auto") + conanfile = base_git.format(url="auto", revision="auto") self.client.save({"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) self._commit_contents() cmd = 'git remote add origin "%s"' % curdir @@ -293,7 +296,6 @@ other_folder/excluded_subfolder self.assertIn("My file is copied", client2.out) def test_source_removed_in_local_cache(self): - conanfile = ''' from conans import ConanFile, tools @@ -303,11 +305,11 @@ class ConanLib(ConanFile): "url": "auto", "revision": "auto", } - + def build(self): contents = tools.load("myfile") self.output.warn("Contents: %s" % contents) - + ''' path, commit = create_local_git_repo({"myfile": "contents", "conanfile.py": conanfile}, branch="my_release") @@ -330,10 +332,10 @@ class ConanLib(ConanFile): def _relative_paths(folder): submodule_path = os.path.join( - folder, + folder, os.path.basename(os.path.normpath(submodule))) subsubmodule_path = os.path.join( - submodule_path, + submodule_path, os.path.basename(os.path.normpath(subsubmodule))) return submodule_path, subsubmodule_path @@ -473,3 +475,296 @@ class ConanLib(ConanFile): the_json = str(scm_data) data2 = json.loads(the_json) self.assertEquals(data, data2) + + +class SVNSCMTest(SVNLocalRepoTestCase): + + def setUp(self): + self.reference = ConanFileReference.loads("lib/0.1@user/channel") + self.client = TestClient() + + def _commit_contents(self): + # self.client.runner('svn co "{url}" "{path}"'.format(url=self.repo_url, path=self.client.current_folder)) + self.client.runner("svn add *", cwd=self.client.current_folder) + self.client.runner('svn commit -m "commiting"', cwd=self.client.current_folder) + + def test_scm_other_type_ignored(self): + conanfile = ''' +from conans import ConanFile, tools + +class ConanLib(ConanFile): + name = "lib" + version = "0.1" + scm = ["Other stuff"] + +''' + self.client.save({"conanfile.py": conanfile}) + # nothing breaks + self.client.run("export . user/channel") + + def test_repeat_clone_changing_subfolder(self): + tmp = ''' +from conans import ConanFile, tools + +class ConanLib(ConanFile): + name = "lib" + version = "0.1" + scm = {{ + "type": "svn", + "url": "{url}", + "revision": "{revision}", + "subfolder": "onesubfolder" + }} +''' + project_url, rev = self.create_project(files={"myfile": "contents"}) + conanfile = tmp.format(url=project_url, revision=rev) + self.client.save({"conanfile.py": conanfile, + "myfile.txt": "My file is copied"}) + self.client.run("create . user/channel") + conanfile = conanfile.replace('"onesubfolder"', '"othersubfolder"') + self.client.save({"conanfile.py": conanfile}) + self.client.run("create . user/channel") + folder = self.client.client_cache.source(ConanFileReference.loads("lib/0.1@user/channel")) + self.assertIn("othersubfolder", os.listdir(folder)) + self.assertTrue(os.path.exists(os.path.join(folder, "othersubfolder", "myfile"))) + + def test_auto_filesystem_remote_svn(self): + # SVN origin will never be a local path (local repo has at least protocol file:///) + pass + + def test_auto_svn(self): + conanfile = base_svn.format(directory="None", url="auto", revision="auto") + project_url, rev = self.create_project(files={"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + + curdir = self.client.current_folder.replace("\\", "/") + # Create the package, will copy the sources from the local folder + self.client.run("create . user/channel") + sources_dir = self.client.client_cache.scm_folder(self.reference) + self.assertEquals(load(sources_dir), curdir) + self.assertIn("Repo origin deduced by 'auto': {}".format(project_url).lower(), + str(self.client.out).lower()) + self.assertIn("Revision deduced by 'auto'", self.client.out) + self.assertIn("Getting sources from folder: %s" % curdir, self.client.out) + self.assertIn("My file is copied", self.client.out) + + # Export again but now with absolute reference, so no pointer file is created nor kept + svn = SVN(curdir) + self.client.save({"conanfile.py": base_svn.format(url=svn.get_remote_url(), revision=svn.get_revision())}) + self.client.run("create . user/channel", ignore_error=False) + sources_dir = self.client.client_cache.scm_folder(self.reference) + self.assertFalse(os.path.exists(sources_dir)) + self.assertNotIn("Repo origin deduced by 'auto'", self.client.out) + self.assertNotIn("Revision deduced by 'auto'", self.client.out) + self.assertIn("Getting sources from url: '{}'".format(project_url).lower(), + str(self.client.out).lower()) + self.assertIn("My file is copied", self.client.out) + + def test_auto_subfolder(self): + conanfile = base_svn.replace('"revision": "{revision}"', + '"revision": "{revision}",\n ' + '"subfolder": "mysub"') + conanfile = conanfile.replace("short_paths = True", "short_paths = False") + conanfile = conanfile.format(directory="None", url="auto", revision="auto") + + project_url, rev = self.create_project(files={"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + self.client.run("create . user/channel") + + folder = self.client.client_cache.source(ConanFileReference.loads("lib/0.1@user/channel")) + self.assertTrue(os.path.exists(os.path.join(folder, "mysub", "myfile.txt"))) + self.assertFalse(os.path.exists(os.path.join(folder, "mysub", "conanfile.py"))) + + def test_auto_conanfile_no_root(self): + """ + Conanfile is not in the root of the repo: https://github.com/conan-io/conan/issues/3465 + """ + curdir = self.client.current_folder + conanfile = base_svn.format(url="auto", revision="auto") + project_url, rev = self.create_project(files={"conan/conanfile.py": conanfile, + "myfile.txt": "My file is copied"}) + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, + path=self.client.current_folder)) + self.client.run("create conan/ user/channel") + + sources_dir = self.client.client_cache.scm_folder(self.reference) + self.assertEquals(load(sources_dir), curdir.replace('\\', '/')) # Root of git is 'curdir' + + def test_deleted_source_folder(self): + # SVN will always retrieve from 'remote' + pass + + def test_excluded_repo_fies(self): + conanfile = base_svn.format(url="auto", revision="auto") + conanfile = conanfile.replace("short_paths = True", "short_paths = False") + project_url, rev = self.create_project(files={"myfile": "contents", + "ignored.pyc": "bin", # SVN ignores pyc files by default: http://blogs.collab.net/subversion/repository-dictated-configuration-day-3-global-ignores + # ".gitignore": "*.pyc\n", + "myfile.txt": "My file!", + "conanfile.py": conanfile}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + + self.client.run("create . user/channel") + self.assertIn("Copying sources to build folder", self.client.out) + pref = PackageReference(ConanFileReference.loads("lib/0.1/user/channel"), + "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9") + bf = self.client.client_cache.build(pref) + self.assertTrue(os.path.exists(os.path.join(bf, "myfile.txt"))) + self.assertTrue(os.path.exists(os.path.join(bf, "myfile"))) + self.assertTrue(os.path.exists(os.path.join(bf, ".svn"))) + self.assertFalse(os.path.exists(os.path.join(bf, "ignored.pyc"))) + + def test_local_source(self): + curdir = self.client.current_folder + conanfile = base_svn.format(url="auto", revision="auto") + conanfile += """ + def source(self): + self.output.warn("SOURCE METHOD CALLED") +""" + project_url, rev = self.create_project(files={"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + self.client.save({"aditional_file.txt": "contents"}) + + self.client.run("source . --source-folder=./source") + self.assertTrue(os.path.exists(os.path.join(curdir, "source", "myfile.txt"))) + self.assertIn("SOURCE METHOD CALLED", self.client.out) + # Even the not commited files are copied + self.assertTrue(os.path.exists(os.path.join(curdir, "source", "aditional_file.txt"))) + self.assertIn("Getting sources from folder: %s" % curdir, self.client.out) + + # Export again but now with absolute reference, so no pointer file is created nor kept + svn = SVN(curdir.replace("\\", "/")) + conanfile = base_svn.format(url=svn.get_remote_url(), revision=svn.get_revision()) + conanfile += """ + def source(self): + self.output.warn("SOURCE METHOD CALLED") +""" + self.client.save({"conanfile.py": conanfile, + "myfile2.txt": "My file is copied"}) + self._commit_contents() + self.client.run("source . --source-folder=./source2") + # myfile2 is no in the specified commit + self.assertFalse(os.path.exists(os.path.join(curdir, "source2", "myfile2.txt"))) + self.assertTrue(os.path.exists(os.path.join(curdir, "source2", "myfile.txt"))) + self.assertIn("Getting sources from url: '{}'".format(project_url).lower(), + str(self.client.out).lower()) + self.assertIn("SOURCE METHOD CALLED", self.client.out) + + def test_local_source_subfolder(self): + curdir = self.client.current_folder + conanfile = base_svn.replace('"revision": "{revision}"', + '"revision": "{revision}",\n ' + '"subfolder": "mysub"') + conanfile = conanfile.format(url="auto", revision="auto") + conanfile += """ + def source(self): + self.output.warn("SOURCE METHOD CALLED") +""" + project_url, rev = self.create_project(files={"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + + self.client.run("source . --source-folder=./source") + self.assertFalse(os.path.exists(os.path.join(curdir, "source", "myfile.txt"))) + self.assertTrue(os.path.exists(os.path.join(curdir, "source", "mysub", "myfile.txt"))) + self.assertIn("SOURCE METHOD CALLED", self.client.out) + + def test_install_checked_out(self): + test_server = TestServer() + self.servers = {"myremote": test_server} + self.client = TestClient(servers=self.servers, users={"myremote": [("lasote", "mypass")]}) + + curdir = self.client.current_folder.replace("\\", "/") + conanfile = base_svn.format(url="auto", revision="auto") + project_url, rev = self.create_project(files={"conanfile.py": conanfile, "myfile.txt": "My file is copied"}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + self.client.run("export . lasote/channel") + self.client.run("upload lib* -c") + + # Take other client, the old client folder will be used as a remote + client2 = TestClient(servers=self.servers, users={"myremote": [("lasote", "mypass")]}) + client2.run("install lib/0.1@lasote/channel --build") + self.assertIn("My file is copied", client2.out) + + def test_source_removed_in_local_cache(self): + conanfile = ''' +from conans import ConanFile, tools + +class ConanLib(ConanFile): + scm = { + "type": "svn", + "url": "auto", + "revision": "auto", + } + + def build(self): + contents = tools.load("myfile") + self.output.warn("Contents: %s" % contents) + +''' + project_url, rev = self.create_project(files={"myfile": "contents", "conanfile.py": conanfile}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + + self.client.run("create . lib/1.0@user/channel") + self.assertIn("Contents: contents", self.client.out) + self.client.save({"myfile": "Contents 2"}) + self.client.run("create . lib/1.0@user/channel") + self.assertIn("Contents: Contents 2", self.client.out) + self.assertIn("Detected 'scm' auto in conanfile, trying to remove source folder", + self.client.out) + + def test_submodule(self): + # SVN has no submodules, may add something related to svn:external? + pass + + def test_source_method_export_sources_and_scm_mixed(self): + project_url, rev = self.create_project(files={"myfile": "contents"}) + project_url = project_url.replace(" ", "%20") + self.client.runner('svn co "{url}" "{path}"'.format(url=project_url, path=self.client.current_folder)) + + conanfile = ''' +import os +from conans import ConanFile, tools + +class ConanLib(ConanFile): + name = "lib" + version = "0.1" + exports_sources = "file.txt" + scm = {{ + "type": "svn", + "url": "{url}", + "revision": "{rev}", + "subfolder": "src" + }} + + def source(self): + self.output.warn("SOURCE METHOD CALLED") + assert(os.path.exists("file.txt")) + assert(os.path.exists(os.path.join("src", "myfile"))) + tools.save("cosa.txt", "contents") + + def build(self): + assert(os.path.exists("file.txt")) + assert(os.path.exists("cosa.txt")) + self.output.warn("BUILD METHOD CALLED") +'''.format(url=project_url, rev=rev) + self.client.save({"conanfile.py": conanfile, "file.txt": "My file is copied"}) + self.client.run("create . user/channel") + self.assertIn("SOURCE METHOD CALLED", self.client.out) + self.assertIn("BUILD METHOD CALLED", self.client.out) + + def test_scm_serialization(self): + data = {"url": "myurl", "revision": "23", "username": "myusername", + "password": "mypassword", "type": "svn", "verify_ssl": True, + "subfolder": "mysubfolder"} + conanfile = namedtuple("ConanfileMock", "scm")(data) + scm_data = SCMData(conanfile) + the_json = str(scm_data) + data2 = json.loads(the_json) + self.assertEquals(data, data2) diff --git a/conans/test/util/tools_test.py b/conans/test/util/tools_test.py index 825d8467f..7a41d25f4 100644 --- a/conans/test/util/tools_test.py +++ b/conans/test/util/tools_test.py @@ -4,12 +4,14 @@ import mock import os import platform import unittest +import uuid from collections import namedtuple import six from mock.mock import patch, mock_open from six import StringIO +from six.moves.urllib.parse import unquote from conans.client.client_cache import CONAN_CONF @@ -18,7 +20,7 @@ from conans.client.conan_api import ConanAPIV1 from conans.client.conf import default_settings_yml, default_client_conf from conans.client.output import ConanOutput from conans.client.tools.win import vcvars_dict, vswhere -from conans.client.tools.scm import Git +from conans.client.tools.scm import Git, SVN from conans.errors import ConanException, NotFoundException from conans.model.build_info import CppInfo @@ -28,7 +30,7 @@ from conans.test.build_helpers.cmake_test import ConanFileMock from conans.test.utils.runner import TestRunner from conans.test.utils.test_files import temp_folder from conans.test.utils.tools import TestClient, TestBufferConanOutput, create_local_git_repo, \ - StoppableThreadBottle + SVNLocalRepoTestCase, StoppableThreadBottle from conans.tools import which from conans.tools import OSInfo, SystemPackageTool, replace_in_file, AptTool, ChocolateyTool,\ @@ -1197,6 +1199,75 @@ ProgramFiles(x86)=C:\Program Files (x86) self.assertTrue(os.path.exists("test_folder")) thread.stop() + def unix_to_dos_unit_test(self): + + def save_file(contents): + tmp = temp_folder() + filepath = os.path.join(tmp, "a_file.txt") + save(filepath, contents) + return filepath + + fp = save_file(b"a line\notherline\n") + if not tools.os_info.is_windows: + import subprocess + output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) + self.assertIn("ASCII text", str(output)) + self.assertNotIn("CRLF", str(output)) + + tools.unix2dos(fp) + output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) + self.assertIn("ASCII text", str(output)) + self.assertIn("CRLF", str(output)) + else: + fc = tools.load(fp) + self.assertNotIn("\r\n", fc) + tools.unix2dos(fp) + fc = tools.load(fp) + self.assertIn("\r\n", fc) + + self.assertEquals("a line\r\notherline\r\n", str(tools.load(fp))) + + fp = save_file(b"a line\r\notherline\r\n") + if not tools.os_info.is_windows: + import subprocess + output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) + self.assertIn("ASCII text", str(output)) + self.assertIn("CRLF", str(output)) + + tools.dos2unix(fp) + output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) + self.assertIn("ASCII text", str(output)) + self.assertNotIn("CRLF", str(output)) + else: + fc = tools.load(fp) + self.assertIn("\r\n", fc) + tools.dos2unix(fp) + fc = tools.load(fp) + self.assertNotIn("\r\n", fc) + + self.assertEquals("a line\notherline\n", str(tools.load(fp))) + + def unix_to_dos_conanfile_test(self): + client = TestClient() + conanfile = """ +import os +from conans import ConanFile, tools + +class HelloConan(ConanFile): + name = "Hello" + version = "0.1" + exports_sources = "file.txt" + + def build(self): + assert("\\r\\n" in tools.load("file.txt")) + tools.dos2unix("file.txt") + assert("\\r\\n" not in tools.load("file.txt")) + tools.unix2dos("file.txt") + assert("\\r\\n" in tools.load("file.txt")) +""" + client.save({"conanfile.py": conanfile, "file.txt": "hello\r\n"}) + client.run("create . user/channel") + class GitToolTest(unittest.TestCase): @@ -1213,6 +1284,29 @@ class GitToolTest(unittest.TestCase): git = Git(subfolder) self.assertEqual(root_path, git.get_repo_root()) + def test_is_pristine(self): + root_path, _ = create_local_git_repo({"myfile": "anything"}) + + git = Git(root_path) + self.assertTrue(git.is_pristine()) + + save(os.path.join(root_path, "other_file"), "content") + self.assertFalse(git.is_pristine()) + + git.run("add .") + self.assertFalse(git.is_pristine()) + + git.run('commit -m "commit"') + self.assertTrue(git.is_pristine()) + + def test_is_local_repository(self): + root_path, _ = create_local_git_repo({"myfile": "anything"}) + + git = Git(temp_folder()) + git.clone(root_path) + self.assertTrue(git.is_local_repository()) + # TODO: Check that with remote one it is working too + def test_clone_git(self): path, _ = create_local_git_repo({"myfile": "contents"}) tmp = temp_folder() @@ -1415,75 +1509,324 @@ class HelloConan(ConanFile): client.run("create . user/channel", ignore_error=True) self.assertIn("specify a branch to checkout", client.out) - def unix_to_dos_unit_test(self): - - def save_file(contents): - tmp = temp_folder() - filepath = os.path.join(tmp, "a_file.txt") - save(filepath, contents) - return filepath - - fp = save_file(b"a line\notherline\n") - if not tools.os_info.is_windows: - import subprocess - output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) - self.assertIn("ASCII text", str(output)) - self.assertNotIn("CRLF", str(output)) - - tools.unix2dos(fp) - output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) - self.assertIn("ASCII text", str(output)) - self.assertIn("CRLF", str(output)) - else: - fc = tools.load(fp) - self.assertNotIn("\r\n", fc) - tools.unix2dos(fp) - fc = tools.load(fp) - self.assertIn("\r\n", fc) - self.assertEquals("a line\r\notherline\r\n", str(tools.load(fp))) +class SVNToolTestsBasic(SVNLocalRepoTestCase): + + def test_clone(self): + project_url, _ = self.create_project(files={'myfile': "contents"}) + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + self.assertTrue(os.path.exists(os.path.join(tmp_folder, 'myfile'))) + + def test_revision_number(self): + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=self.repo_url) + rev = int(svn.get_revision()) + self.create_project(files={'another_file': "content"}) + svn.run("update") + rev2 = int(svn.get_revision()) + self.assertEqual(rev2, rev + 1) + + def test_repo_url(self): + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=self.repo_url) + remote_url = svn.get_remote_url() + self.assertEqual(remote_url.lower(), self.repo_url.lower()) + + svn2 = SVN(folder=self.gimme_tmp(create=False)) + svn2.checkout(url=remote_url) # clone using quoted url + self.assertEqual(svn2.get_remote_url().lower(), self.repo_url.lower()) + + def test_repo_project_url(self): + project_url, _ = self.create_project(files={"myfile": "content"}) + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=project_url) + self.assertEqual(svn.get_remote_url().lower(), project_url.lower()) + + def test_checkout(self): + # Ensure we have several revisions in the repository + self.create_project(files={'file': "content"}) + self.create_project(files={'file': "content"}) + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=self.repo_url) + rev = int(svn.get_revision()) + svn.update(revision=rev - 1) # Checkout previous revision + self.assertTrue(int(svn.get_revision()), rev-1) + + def test_clone_over_dirty_directory(self): + project_url, _ = self.create_project(files={'myfile': "contents"}) + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + + new_file = os.path.join(tmp_folder, "new_file") + with open(new_file, "w") as f: + f.write("content") + + mod_file = os.path.join(tmp_folder, "myfile") + with open(mod_file, "a") as f: + f.write("new content") + + self.assertFalse(svn.is_pristine()) + svn.checkout(url=project_url) # SVN::clone over a dirty repo reverts all changes (but it doesn't delete non versioned files) + self.assertTrue(svn.is_pristine()) + # self.assertFalse(os.path.exists(new_file)) + + def test_excluded_files(self): + project_url, _ = self.create_project(files={'myfile': "contents"}) + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + + # Add untracked file + new_file = os.path.join(tmp_folder, str(uuid.uuid4())) + with open(new_file, "w") as f: + f.write("content") + + # Add ignore file + file_to_ignore = str(uuid.uuid4()) + with open(os.path.join(tmp_folder, file_to_ignore), "w") as f: + f.write("content") + svn.run("propset svn:ignore {} .".format(file_to_ignore)) + svn.run('commit -m "add ignored file"') + + excluded_files = svn.excluded_files() + self.assertIn(file_to_ignore, excluded_files) + self.assertNotIn('.svn', excluded_files) + self.assertEqual(len(excluded_files), 1) - fp = save_file(b"a line\r\notherline\r\n") - if not tools.os_info.is_windows: - import subprocess - output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) - self.assertIn("ASCII text", str(output)) - self.assertIn("CRLF", str(output)) + def test_credentials(self): + svn = SVN(folder=self.gimme_tmp(), username="ada", password="lovelace") + url_credentials = svn.get_url_with_credentials("https://some.url.com") + self.assertEquals(url_credentials, "https://ada:[email protected]") - tools.dos2unix(fp) - output = subprocess.check_output(["file", fp], stderr=subprocess.STDOUT) - self.assertIn("ASCII text", str(output)) - self.assertNotIn("CRLF", str(output)) + def test_verify_ssl(self): + class MyRunner(object): + def __init__(self, svn): + self.calls = [] + self._runner = svn._runner + svn._runner = self + + def __call__(self, command, *args, **kwargs): + self.calls.append(command) + return self._runner(command, *args, **kwargs) + + project_url, _ = self.create_project(files={'myfile': "contents", + 'subdir/otherfile': "content"}) + + svn = SVN(folder=self.gimme_tmp(), username="peter", password="otool", verify_ssl=True) + runner = MyRunner(svn) + svn.checkout(url=project_url) + self.assertNotIn("--trust-server-cert-failures=unknown-ca", runner.calls[1]) + + svn = SVN(folder=self.gimme_tmp(), username="peter", password="otool", verify_ssl=False) + runner = MyRunner(svn) + svn.checkout(url=project_url) + if SVN.get_version() >= SVN.API_CHANGE_VERSION: + self.assertIn("--trust-server-cert-failures=unknown-ca", runner.calls[1]) else: - fc = tools.load(fp) - self.assertIn("\r\n", fc) - tools.dos2unix(fp) - fc = tools.load(fp) - self.assertNotIn("\r\n", fc) + self.assertIn("--trust-server-cert", runner.calls[1]) - self.assertEquals("a line\notherline\n", str(tools.load(fp))) - - def unix_to_dos_conanfile_test(self): - client = TestClient() - conanfile = """ + def test_repo_root(self): + project_url, _ = self.create_project(files={'myfile': "contents", + 'subdir/otherfile': "content"}) + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + + path = os.path.realpath(tmp_folder).replace('\\', '/').lower() + self.assertEqual(path, svn.get_repo_root().lower()) + + # SVN instantiated in a subfolder + svn2 = SVN(folder=os.path.join(tmp_folder, 'subdir')) + self.assertFalse(svn2.folder == tmp_folder) + path = os.path.realpath(tmp_folder).replace('\\', '/').lower() + self.assertEqual(path, svn2.get_repo_root().lower()) + + def test_is_local_repository(self): + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=self.repo_url) + self.assertTrue(svn.is_local_repository()) + + # TODO: Test not local repository + + def test_last_changed_revision(self): + project_url, _ = self.create_project(files={'project1/myfile': "contents", + 'project2/myfile': "content", + 'project2/subdir1/myfile': "content", + 'project2/subdir2/myfile': "content", + }) + prj1 = SVN(folder=self.gimme_tmp()) + prj1.checkout(url='/'.join([project_url, 'project1'])) + + prj2 = SVN(folder=self.gimme_tmp()) + prj2.checkout(url='/'.join([project_url, 'project2'])) + + self.assertEqual(prj1.get_last_changed_revision(), prj2.get_last_changed_revision()) + + # Modify file in one subfolder of prj2 + with open(os.path.join(prj2.folder, "subdir1", "myfile"), "a") as f: + f.write("new content") + prj2.run('commit -m "add to file"') + prj2.run('update') + prj1.run('update') + + self.assertNotEqual(prj1.get_last_changed_revision(), prj2.get_last_changed_revision()) + self.assertEqual(prj1.get_revision(), prj2.get_revision()) + + # Instantiate a SVN in the other subfolder + prj2_subdir2 = SVN(folder=os.path.join(prj2.folder, "subdir2")) + prj2_subdir2.run('update') + self.assertEqual(prj2.get_last_changed_revision(), + prj2_subdir2.get_last_changed_revision()) + self.assertNotEqual(prj2.get_last_changed_revision(use_wc_root=False), + prj2_subdir2.get_last_changed_revision(use_wc_root=False)) + + def test_branch(self): + project_url, _ = self.create_project(files={'prj1/trunk/myfile': "contents", + 'prj1/branches/my_feature/myfile': "", + 'prj1/branches/issue3434/myfile': "", + 'prj1/tags/v12.3.4/myfile': "", + }) + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url='/'.join([project_url, 'prj1', 'trunk'])) + self.assertEqual("trunk", svn.get_branch()) + + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url='/'.join([project_url, 'prj1', 'branches', 'my_feature'])) + self.assertEqual("branches/my_feature", svn.get_branch()) + + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url='/'.join([project_url, 'prj1', 'branches', 'issue3434'])) + self.assertEqual("branches/issue3434", svn.get_branch()) + + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url='/'.join([project_url, 'prj1', 'tags', 'v12.3.4'])) + self.assertEqual("tags/v12.3.4", svn.get_branch()) + + +class SVNToolTestsPristine(SVNLocalRepoTestCase): + + def test_checkout(self): + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=self.repo_url) + self.assertTrue(svn.is_pristine()) + + def test_checkout_project(self): + project_url, _ = self.create_project(files={'myfile': "contents"}) + + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + self.assertTrue(svn.is_pristine()) + + def test_modified_file(self): + project_url, _ = self.create_project(files={'myfile': "contents"}) + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + with open(os.path.join(tmp_folder, "myfile"), "a") as f: + f.write("new content") + self.assertFalse(svn.is_pristine()) + + def test_untracked_file(self): + self.create_project(files={'myfile': "contents"}) + tmp_folder = self.gimme_tmp() + svn = SVN(folder=tmp_folder) + svn.checkout(url=self.repo_url) + with open(os.path.join(tmp_folder, "not_tracked.txt"), "w") as f: + f.write("content") + self.assertTrue(svn.is_pristine()) + + def test_ignored_file(self): + tmp_folder = self.gimme_tmp() + svn = SVN(folder=self.gimme_tmp()) + svn.checkout(url=self.repo_url) + file_to_ignore = "secret.txt" + with open(os.path.join(tmp_folder, file_to_ignore), "w") as f: + f.write("content") + svn.run("propset svn:ignore {} .".format(file_to_ignore)) + self.assertFalse(svn.is_pristine()) # Folder properties have been modified + svn.run('commit -m "add ignored file"') + self.assertTrue(svn.is_pristine()) + + def test_conflicted_file(self): + project_url, _ = self.create_project(files={'myfile': "contents"}) + + def work_on_project(tmp_folder): + svn = SVN(folder=tmp_folder) + svn.checkout(url=project_url) + self.assertTrue(svn.is_pristine()) + with open(os.path.join(tmp_folder, "myfile"), "a") as f: + f.write("random content: {}".format(uuid.uuid4())) + return svn + + # Two users working on the same project + svn1 = work_on_project(self.gimme_tmp()) + svn2 = work_on_project(self.gimme_tmp()) + + # User1 is faster + svn1.run('commit -m "user1 commit"') + self.assertFalse(svn1.is_pristine()) + svn1.run('update') # Yes, we need to update local copy in order to have the same revision everywhere. + self.assertTrue(svn1.is_pristine()) + + # User2 updates and get a conflicted file + svn2.run('update') + self.assertFalse(svn2.is_pristine()) + svn2.run('revert . -R') + self.assertTrue(svn2.is_pristine()) + + +class SVNToolsTestsRecipe(SVNLocalRepoTestCase): + + conanfile = """ import os from conans import ConanFile, tools class HelloConan(ConanFile): name = "Hello" version = "0.1" - exports_sources = "file.txt" + exports_sources = "other" + + def source(self): + svn = tools.SVN({svn_folder}) + svn.checkout(url="{svn_url}") def build(self): - assert("\\r\\n" in tools.load("file.txt")) - tools.dos2unix("file.txt") - assert("\\r\\n" not in tools.load("file.txt")) - tools.unix2dos("file.txt") - assert("\\r\\n" in tools.load("file.txt")) + assert(os.path.exists("{file_path}")) + assert(os.path.exists("other")) """ - client.save({"conanfile.py": conanfile, "file.txt": "hello\r\n"}) + + def test_clone_root_folder(self): + tmp_folder = self.gimme_tmp() + client = TestClient() + client.runner('svn co "{}" "{}"'.format(self.repo_url, tmp_folder)) + save(os.path.join(tmp_folder, "file.h"), "contents") + client.runner("svn add file.h", cwd=tmp_folder) + client.runner('svn commit -m "message"', cwd=tmp_folder) + + conanfile = self.conanfile.format(svn_folder="", svn_url=self.repo_url, + file_path="file.h") + client.save({"conanfile.py": conanfile, "other": "hello"}) client.run("create . user/channel") + def test_clone_subfolder(self): + tmp_folder = self.gimme_tmp() + client = TestClient() + client.runner('svn co "{}" "{}"'.format(self.repo_url, tmp_folder)) + save(os.path.join(tmp_folder, "file.h"), "contents") + client.runner("svn add file.h", cwd=tmp_folder) + client.runner('svn commit -m "message"', cwd=tmp_folder) + + conanfile = self.conanfile.format(svn_folder="\"src\"", svn_url=self.repo_url, + file_path="src/file.h") + client.save({"conanfile.py": conanfile, "other": "hello"}) + client.run("create . user/channel") + + +class CollectLibTestCase(unittest.TestCase): def collect_libs_test(self): conanfile = ConanFileMock() # Without package_folder @@ -1546,3 +1889,4 @@ class HelloConan(ConanFile): self.assertEqual(["mylib"], result) self.assertIn("WARN: Lib folder doesn't exist, can't collect libraries: %s" % no_folder_path, conanfile.output) + diff --git a/conans/test/utils/tools.py b/conans/test/utils/tools.py index df7b77c08..629a6bee5 100644 --- a/conans/test/utils/tools.py +++ b/conans/test/utils/tools.py @@ -5,16 +5,22 @@ import shutil import sys import threading import uuid +import errno +import stat from collections import Counter from contextlib import contextmanager from io import StringIO +import subprocess +import unittest +import tempfile +import platform import bottle import requests import six import time from mock import Mock -from six.moves.urllib.parse import urlsplit, urlunsplit +from six.moves.urllib.parse import urlsplit, urlunsplit, quote from webtest.app import TestApp from conans import __version__ as CLIENT_VERSION, tools @@ -28,8 +34,9 @@ from conans.client.plugin_manager import PluginManager from conans.client.remote_registry import RemoteRegistry from conans.client.rest.conan_requester import ConanRequester from conans.client.rest.uploader_downloader import IterableToFileAdapter -from conans.client.tools.scm import Git +from conans.client.tools.scm import Git, SVN from conans.client.userio import UserIO +from conans.client.tools.files import chdir from conans.model.version import Version from conans.test.server.utils.server_launcher import (TESTING_REMOTE_PRIVATE_USER, TESTING_REMOTE_PRIVATE_PASS, @@ -45,6 +52,7 @@ from conans.model.manifest import FileTreeManifest from conans.client.tools.win import get_cased_path + def inc_recipe_manifest_timestamp(client_cache, conan_ref, inc_time): conan_ref = ConanFileReference.loads(str(conan_ref)) path = client_cache.export(conan_ref) @@ -306,6 +314,60 @@ def create_local_git_repo(files=None, branch=None, submodules=None, folder=None) return tmp.replace("\\", "/"), git.get_revision() +def handleRemoveReadonly(func, path, exc): # TODO: May promote to conan tools? + # Credit: https://stackoverflow.com/questions/1213706/what-user-do-python-scripts-run-as-in-windows + excvalue = exc[1] + if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES: + os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777 + func(path) + else: + raise + + +class SVNLocalRepoTestCase(unittest.TestCase): + path_with_spaces = True + + def _create_local_svn_repo(self): + repo_url = os.path.join(self._tmp_folder, 'repo_server') + subprocess.check_output('svnadmin create "{}"'.format(repo_url), shell=True) + return SVN.file_protocol + quote(repo_url.replace("\\", "/"), safe='/:') + + def gimme_tmp(self, create=True): + tmp = os.path.join(self._tmp_folder, str(uuid.uuid4())) + if create: + os.makedirs(tmp) + return tmp + + def create_project(self, files, rel_project_path=None, commit_msg='default commit message', delete_checkout=True): + tmp_dir = self.gimme_tmp() + try: + rel_project_path = rel_project_path or str(uuid.uuid4()) + # Do not use SVN class as it is what we will be testing + subprocess.check_output('svn co "{url}" "{path}"'.format(url=self.repo_url, path=tmp_dir), shell=True) + tmp_project_dir = os.path.join(tmp_dir, rel_project_path) + os.makedirs(tmp_project_dir) + save_files(tmp_project_dir, files) + with chdir(tmp_project_dir): + subprocess.check_output("svn add .", shell=True) + subprocess.check_output('svn commit -m "{}"'.format(commit_msg), shell=True) + rev = subprocess.check_output("svn info --show-item revision", shell=True).decode().strip() + project_url = self.repo_url + "/" + quote(rel_project_path.replace("\\", "/")) + return project_url, rev + finally: + if delete_checkout: + shutil.rmtree(tmp_dir, ignore_errors=False, onerror=handleRemoveReadonly) + + def run(self, *args, **kwargs): + tmp_folder = tempfile.mkdtemp(suffix='_conans') + try: + self._tmp_folder = os.path.join(tmp_folder, 'path with spaces' if self.path_with_spaces else 'pathwithoutspaces') + os.makedirs(self._tmp_folder) + self.repo_url = self._create_local_svn_repo() + super(SVNLocalRepoTestCase, self).run(*args, **kwargs) + finally: + shutil.rmtree(tmp_folder, ignore_errors=False, onerror=handleRemoveReadonly) + + class MockedUserIO(UserIO): """
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 5 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "nose-cov", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc pkg-config" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==2.11.7 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@0e1a608aa997760d0012027f4168e0017d63e62f#egg=conan cov-core==1.15.0 coverage==4.2 deprecation==2.0.7 dill==0.3.4 distro==1.1.0 fasteners==0.19 future==0.16.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 node-semver==0.2.0 nose==1.3.7 nose-cov==1.6 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 platformdirs==2.4.0 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 Pygments==2.14.0 PyJWT==1.7.1 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.13 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==2.11.7 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - cov-core==1.15.0 - coverage==4.2 - deprecation==2.0.7 - dill==0.3.4 - distro==1.1.0 - fasteners==0.19 - future==0.16.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - node-semver==0.2.0 - nose==1.3.7 - nose-cov==1.6 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - platformdirs==2.4.0 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.13 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/functional/scm_test.py::GitSCMTest::test_scm_other_type_ignored", "conans/test/functional/scm_test.py::SVNSCMTest::test_auto_filesystem_remote_svn", "conans/test/functional/scm_test.py::SVNSCMTest::test_deleted_source_folder", "conans/test/functional/scm_test.py::SVNSCMTest::test_scm_other_type_ignored", "conans/test/functional/scm_test.py::SVNSCMTest::test_scm_serialization", "conans/test/functional/scm_test.py::SVNSCMTest::test_submodule", "conans/test/util/tools_test.py::ReplaceInFileTest::test_replace_in_file", "conans/test/util/tools_test.py::ToolsTest::test_environment_nested", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_git", "conans/test/util/tools_test.py::GitToolTest::test_clone_existing_folder_without_branch", "conans/test/util/tools_test.py::GitToolTest::test_clone_git", "conans/test/util/tools_test.py::GitToolTest::test_credentials", "conans/test/util/tools_test.py::GitToolTest::test_is_local_repository", "conans/test/util/tools_test.py::GitToolTest::test_is_pristine", "conans/test/util/tools_test.py::GitToolTest::test_repo_root", "conans/test/util/tools_test.py::GitToolTest::test_verify_ssl", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_branch", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_checkout", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_clone", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_clone_over_dirty_directory", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_credentials", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_excluded_files", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_is_local_repository", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_last_changed_revision", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_repo_project_url", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_repo_root", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_repo_url", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_revision_number", "conans/test/util/tools_test.py::SVNToolTestsBasic::test_verify_ssl", "conans/test/util/tools_test.py::SVNToolTestsPristine::test_checkout", "conans/test/util/tools_test.py::SVNToolTestsPristine::test_checkout_project", "conans/test/util/tools_test.py::SVNToolTestsPristine::test_conflicted_file", "conans/test/util/tools_test.py::SVNToolTestsPristine::test_ignored_file", "conans/test/util/tools_test.py::SVNToolTestsPristine::test_modified_file", "conans/test/util/tools_test.py::SVNToolTestsPristine::test_untracked_file" ]
[ "conans/test/functional/scm_test.py::GitSCMTest::test_auto_conanfile_no_root", "conans/test/functional/scm_test.py::GitSCMTest::test_auto_filesystem_remote_git", "conans/test/functional/scm_test.py::GitSCMTest::test_auto_git", "conans/test/functional/scm_test.py::GitSCMTest::test_auto_subfolder", "conans/test/functional/scm_test.py::GitSCMTest::test_deleted_source_folder", "conans/test/functional/scm_test.py::GitSCMTest::test_excluded_repo_fies", "conans/test/functional/scm_test.py::GitSCMTest::test_install_checked_out", "conans/test/functional/scm_test.py::GitSCMTest::test_local_source", "conans/test/functional/scm_test.py::GitSCMTest::test_local_source_subfolder", "conans/test/functional/scm_test.py::GitSCMTest::test_repeat_clone_changing_subfolder", "conans/test/functional/scm_test.py::GitSCMTest::test_scm_bad_filename", "conans/test/functional/scm_test.py::GitSCMTest::test_source_method_export_sources_and_scm_mixed", "conans/test/functional/scm_test.py::GitSCMTest::test_source_removed_in_local_cache", "conans/test/functional/scm_test.py::GitSCMTest::test_submodule", "conans/test/functional/scm_test.py::SVNSCMTest::test_auto_conanfile_no_root", "conans/test/functional/scm_test.py::SVNSCMTest::test_auto_subfolder", "conans/test/functional/scm_test.py::SVNSCMTest::test_auto_svn", "conans/test/functional/scm_test.py::SVNSCMTest::test_excluded_repo_fies", "conans/test/functional/scm_test.py::SVNSCMTest::test_install_checked_out", "conans/test/functional/scm_test.py::SVNSCMTest::test_local_source", "conans/test/functional/scm_test.py::SVNSCMTest::test_local_source_subfolder", "conans/test/functional/scm_test.py::SVNSCMTest::test_repeat_clone_changing_subfolder", "conans/test/functional/scm_test.py::SVNSCMTest::test_source_method_export_sources_and_scm_mixed", "conans/test/functional/scm_test.py::SVNSCMTest::test_source_removed_in_local_cache", "conans/test/util/tools_test.py::ToolsTest::test_get_env_in_conanfile", "conans/test/util/tools_test.py::ToolsTest::test_global_tools_overrided", "conans/test/util/tools_test.py::GitToolTest::test_clone_submodule_git", "conans/test/util/tools_test.py::SVNToolsTestsRecipe::test_clone_root_folder", "conans/test/util/tools_test.py::SVNToolsTestsRecipe::test_clone_subfolder" ]
[]
[]
MIT License
2,760
[ "conans/client/tools/scm.py", "conans/client/source.py", "conans/client/cmd/export.py", ".gitignore", "conans/model/scm.py" ]
[ "conans/client/tools/scm.py", "conans/client/source.py", "conans/client/cmd/export.py", ".gitignore", "conans/model/scm.py" ]
python-cmd2__cmd2-465
6ddb6842e5ac87fb5c433eb8d86df48f3e045da2
2018-07-11 02:54:33
60a212c1c585f0c4c06ffcfeb9882520af8dbf35
diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e22ee79..764d0061 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.9.3 (TBD, 2018) +## 0.9.3 (July TBD, 2018) +* Bug Fixes + * Fixed bug when StatementParser ``__init__()`` was called with ``terminators`` equal to ``None`` + * Fixed bug when ``Cmd.onecmd()`` was called with a raw ``str`` ## 0.9.2 (June 28, 2018) * Bug Fixes diff --git a/cmd2/cmd2.py b/cmd2/cmd2.py index 42e00c39..44f3a068 100644 --- a/cmd2/cmd2.py +++ b/cmd2/cmd2.py @@ -1901,14 +1901,19 @@ class Cmd(cmd.Cmd): result = target return result - def onecmd(self, statement: Statement) -> Optional[bool]: + def onecmd(self, statement: Union[Statement, str]) -> Optional[bool]: """ This executes the actual do_* method for a command. If the command provided doesn't exist, then it executes _default() instead. - :param statement: Command - a parsed command from the input stream + :param statement: Command - intended to be a Statement instance parsed command from the input stream, + alternative acceptance of a str is present only for backward compatibility with cmd :return: a flag indicating whether the interpretation of commands should stop """ + # For backwards compatibility with cmd, allow a str to be passed in + if not isinstance(statement, Statement): + statement = self._complete_statement(statement) + funcname = self._func_named(statement.command) if not funcname: self.default(statement)
onecmd should accept a raw string I have commands that I want to run without the hooks as strings, but trying to run `onecmd` directly fails because `command` and `args` to be in the namespace. This is inconsistent with the `cmd` standard library module.
python-cmd2/cmd2
diff --git a/tests/test_cmd2.py b/tests/test_cmd2.py index 77dcc875..b973fdf5 100644 --- a/tests/test_cmd2.py +++ b/tests/test_cmd2.py @@ -1787,3 +1787,18 @@ def test_readline_remove_history_item(base_app): assert readline.get_current_history_length() == 1 readline.remove_history_item(0) assert readline.get_current_history_length() == 0 + +def test_onecmd_raw_str_continue(base_app): + line = "help" + stop = base_app.onecmd(line) + out = base_app.stdout.buffer + assert not stop + assert out.strip() == BASE_HELP.strip() + +def test_onecmd_raw_str_quit(base_app): + line = "quit" + stop = base_app.onecmd(line) + out = base_app.stdout.buffer + assert stop + assert out == '' +
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock", "tox", "pylint", "sphinx", "sphinx-rtd-theme", "sphinx-autobuild", "invoke", "twine" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 anyio==4.9.0 astroid==3.3.9 babel==2.17.0 backports.tarfile==1.2.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 -e git+https://github.com/python-cmd2/cmd2.git@6ddb6842e5ac87fb5c433eb8d86df48f3e045da2#egg=cmd2 colorama==0.4.6 coverage==7.8.0 cryptography==44.0.2 dill==0.3.9 distlib==0.3.9 docutils==0.21.2 exceptiongroup==1.2.2 filelock==3.18.0 h11==0.14.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 invoke==2.2.0 isort==6.0.1 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mccabe==0.7.0 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycparser==2.22 Pygments==2.19.1 pylint==3.3.6 pyperclip==1.9.0 pyproject-api==1.9.0 pytest==8.3.5 pytest-cov==6.0.0 pytest-mock==3.14.0 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 sniffio==1.3.1 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-autobuild==2024.10.3 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 starlette==0.46.1 tomli==2.2.1 tomlkit==0.13.2 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 uvicorn==0.34.0 virtualenv==20.29.3 watchfiles==1.0.4 wcwidth==0.2.13 websockets==15.0.1 zipp==3.21.0
name: cmd2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - anyio==4.9.0 - astroid==3.3.9 - babel==2.17.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - colorama==0.4.6 - coverage==7.8.0 - cryptography==44.0.2 - dill==0.3.9 - distlib==0.3.9 - docutils==0.21.2 - exceptiongroup==1.2.2 - filelock==3.18.0 - h11==0.14.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - invoke==2.2.0 - isort==6.0.1 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mccabe==0.7.0 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycparser==2.22 - pygments==2.19.1 - pylint==3.3.6 - pyperclip==1.9.0 - pyproject-api==1.9.0 - pytest==8.3.5 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - sniffio==1.3.1 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-autobuild==2024.10.3 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - starlette==0.46.1 - tomli==2.2.1 - tomlkit==0.13.2 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - uvicorn==0.34.0 - virtualenv==20.29.3 - watchfiles==1.0.4 - wcwidth==0.2.13 - websockets==15.0.1 - zipp==3.21.0 prefix: /opt/conda/envs/cmd2
[ "tests/test_cmd2.py::test_onecmd_raw_str_continue", "tests/test_cmd2.py::test_onecmd_raw_str_quit" ]
[ "tests/test_cmd2.py::test_base_invalid_option", "tests/test_cmd2.py::test_which_editor_good" ]
[ "tests/test_cmd2.py::test_ver", "tests/test_cmd2.py::test_empty_statement", "tests/test_cmd2.py::test_base_help", "tests/test_cmd2.py::test_base_help_verbose", "tests/test_cmd2.py::test_base_help_history", "tests/test_cmd2.py::test_base_argparse_help", "tests/test_cmd2.py::test_base_shortcuts", "tests/test_cmd2.py::test_base_show", "tests/test_cmd2.py::test_base_show_long", "tests/test_cmd2.py::test_base_show_readonly", "tests/test_cmd2.py::test_cast", "tests/test_cmd2.py::test_cast_problems", "tests/test_cmd2.py::test_base_set", "tests/test_cmd2.py::test_set_not_supported", "tests/test_cmd2.py::test_set_quiet", "tests/test_cmd2.py::test_base_shell", "tests/test_cmd2.py::test_base_py", "tests/test_cmd2.py::test_base_run_python_script", "tests/test_cmd2.py::test_base_run_pyscript", "tests/test_cmd2.py::test_recursive_pyscript_not_allowed", "tests/test_cmd2.py::test_pyscript_with_nonexist_file", "tests/test_cmd2.py::test_pyscript_with_exception", "tests/test_cmd2.py::test_pyscript_requires_an_argument", "tests/test_cmd2.py::test_base_error", "tests/test_cmd2.py::test_history_span", "tests/test_cmd2.py::test_history_get", "tests/test_cmd2.py::test_base_history", "tests/test_cmd2.py::test_history_script_format", "tests/test_cmd2.py::test_history_with_string_argument", "tests/test_cmd2.py::test_history_with_integer_argument", "tests/test_cmd2.py::test_history_with_integer_span", "tests/test_cmd2.py::test_history_with_span_start", "tests/test_cmd2.py::test_history_with_span_end", "tests/test_cmd2.py::test_history_with_span_index_error", "tests/test_cmd2.py::test_history_output_file", "tests/test_cmd2.py::test_history_edit", "tests/test_cmd2.py::test_history_run_all_commands", "tests/test_cmd2.py::test_history_run_one_command", "tests/test_cmd2.py::test_base_load", "tests/test_cmd2.py::test_load_with_empty_args", "tests/test_cmd2.py::test_load_with_nonexistent_file", "tests/test_cmd2.py::test_load_with_empty_file", "tests/test_cmd2.py::test_load_with_binary_file", "tests/test_cmd2.py::test_load_with_utf8_file", "tests/test_cmd2.py::test_load_nested_loads", "tests/test_cmd2.py::test_base_runcmds_plus_hooks", "tests/test_cmd2.py::test_base_relative_load", "tests/test_cmd2.py::test_relative_load_requires_an_argument", "tests/test_cmd2.py::test_output_redirection", "tests/test_cmd2.py::test_output_redirection_to_nonexistent_directory", "tests/test_cmd2.py::test_output_redirection_to_too_long_filename", "tests/test_cmd2.py::test_feedback_to_output_true", "tests/test_cmd2.py::test_feedback_to_output_false", "tests/test_cmd2.py::test_allow_redirection", "tests/test_cmd2.py::test_pipe_to_shell", "tests/test_cmd2.py::test_pipe_to_shell_error", "tests/test_cmd2.py::test_base_timing", "tests/test_cmd2.py::test_base_debug", "tests/test_cmd2.py::test_base_colorize", "tests/test_cmd2.py::test_edit_no_editor", "tests/test_cmd2.py::test_edit_file", "tests/test_cmd2.py::test_edit_file_with_spaces", "tests/test_cmd2.py::test_edit_blank", "tests/test_cmd2.py::test_base_py_interactive", "tests/test_cmd2.py::test_exclude_from_history", "tests/test_cmd2.py::test_base_cmdloop_with_queue", "tests/test_cmd2.py::test_base_cmdloop_without_queue", "tests/test_cmd2.py::test_cmdloop_without_rawinput", "tests/test_cmd2.py::test_precmd_hook_success", "tests/test_cmd2.py::test_precmd_hook_failure", "tests/test_cmd2.py::test_interrupt_quit", "tests/test_cmd2.py::test_interrupt_noquit", "tests/test_cmd2.py::test_default_to_shell_unknown", "tests/test_cmd2.py::test_default_to_shell_good", "tests/test_cmd2.py::test_default_to_shell_failure", "tests/test_cmd2.py::test_ansi_prompt_not_esacped", "tests/test_cmd2.py::test_ansi_prompt_escaped", "tests/test_cmd2.py::test_custom_command_help", "tests/test_cmd2.py::test_custom_help_menu", "tests/test_cmd2.py::test_help_undocumented", "tests/test_cmd2.py::test_help_overridden_method", "tests/test_cmd2.py::test_help_cat_base", "tests/test_cmd2.py::test_help_cat_verbose", "tests/test_cmd2.py::test_select_options", "tests/test_cmd2.py::test_select_invalid_option", "tests/test_cmd2.py::test_select_list_of_strings", "tests/test_cmd2.py::test_select_list_of_tuples", "tests/test_cmd2.py::test_select_uneven_list_of_tuples", "tests/test_cmd2.py::test_help_with_no_docstring", "tests/test_cmd2.py::test_which_editor_bad", "tests/test_cmd2.py::test_multiline_complete_empty_statement_raises_exception", "tests/test_cmd2.py::test_multiline_complete_statement_without_terminator", "tests/test_cmd2.py::test_clipboard_failure", "tests/test_cmd2.py::test_commandresult_truthy", "tests/test_cmd2.py::test_commandresult_falsy", "tests/test_cmd2.py::test_is_text_file_bad_input", "tests/test_cmd2.py::test_eof", "tests/test_cmd2.py::test_eos", "tests/test_cmd2.py::test_echo", "tests/test_cmd2.py::test_pseudo_raw_input_tty_rawinput_true", "tests/test_cmd2.py::test_pseudo_raw_input_tty_rawinput_false", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_true_echo_true", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_true_echo_false", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_false_echo_true", "tests/test_cmd2.py::test_pseudo_raw_input_piped_rawinput_false_echo_false", "tests/test_cmd2.py::test_raw_input", "tests/test_cmd2.py::test_stdin_input", "tests/test_cmd2.py::test_empty_stdin_input", "tests/test_cmd2.py::test_poutput_string", "tests/test_cmd2.py::test_poutput_zero", "tests/test_cmd2.py::test_poutput_empty_string", "tests/test_cmd2.py::test_poutput_none", "tests/test_cmd2.py::test_alias", "tests/test_cmd2.py::test_alias_lookup_invalid_alias", "tests/test_cmd2.py::test_unalias", "tests/test_cmd2.py::test_unalias_all", "tests/test_cmd2.py::test_unalias_non_existing", "tests/test_cmd2.py::test_create_invalid_alias[\">\"]", "tests/test_cmd2.py::test_create_invalid_alias[\"no>pe\"]", "tests/test_cmd2.py::test_create_invalid_alias[\"no", "tests/test_cmd2.py::test_create_invalid_alias[\"nopipe|\"]", "tests/test_cmd2.py::test_create_invalid_alias[\"noterm;\"]", "tests/test_cmd2.py::test_create_invalid_alias[noembedded\"quotes]", "tests/test_cmd2.py::test_ppaged", "tests/test_cmd2.py::test_parseline_empty", "tests/test_cmd2.py::test_parseline", "tests/test_cmd2.py::test_readline_remove_history_item" ]
[]
MIT License
2,761
[ "cmd2/cmd2.py", "CHANGELOG.md" ]
[ "cmd2/cmd2.py", "CHANGELOG.md" ]
valohai__valohai-cli-33
fbdad62d3e4177586622e18a53c30fe4f081416a
2018-07-11 15:20:44
b909441d803e87ff45f51d34e40f3aed396bd1a8
codecov[bot]: # [Codecov](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=h1) Report > Merging [#33](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=desc) into [master](https://codecov.io/gh/valohai/valohai-cli/commit/b909441d803e87ff45f51d34e40f3aed396bd1a8?src=pr&el=desc) will **increase** coverage by `0.09%`. > The diff coverage is `91.8%`. [![Impacted file tree graph](https://codecov.io/gh/valohai/valohai-cli/pull/33/graphs/tree.svg?src=pr&token=xqgKRx94XH&width=650&height=150)](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #33 +/- ## ========================================== + Coverage 89.39% 89.49% +0.09% ========================================== Files 79 81 +2 Lines 2056 2113 +57 Branches 262 271 +9 ========================================== + Hits 1838 1891 +53 - Misses 131 133 +2 - Partials 87 89 +2 ``` | [Impacted Files](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [valohai\_cli/utils/cli\_utils.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvdXRpbHMvY2xpX3V0aWxzLnB5) | `70.58% <ø> (ø)` | | | [valohai\_cli/utils/\_\_init\_\_.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvdXRpbHMvX19pbml0X18ucHk=) | `81.31% <ø> (ø)` | | | [valohai\_cli/commands/execution/run.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvY29tbWFuZHMvZXhlY3V0aW9uL3J1bi5weQ==) | `81.51% <100%> (+2.87%)` | :arrow_up: | | [valohai\_cli/commands/project/link.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvY29tbWFuZHMvcHJvamVjdC9saW5rLnB5) | `91.48% <100%> (ø)` | :arrow_up: | | [valohai\_cli/yaml\_wizard.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkveWFtbF93aXphcmQucHk=) | `95.23% <100%> (ø)` | :arrow_up: | | [tests/commands/execution/test\_run.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dGVzdHMvY29tbWFuZHMvZXhlY3V0aW9uL3Rlc3RfcnVuLnB5) | `100% <100%> (ø)` | :arrow_up: | | [valohai\_cli/git.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvZ2l0LnB5) | `88.23% <50%> (+0.73%)` | :arrow_up: | | [valohai\_cli/utils/levenshtein.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvdXRpbHMvbGV2ZW5zaHRlaW4ucHk=) | `80% <80%> (ø)` | | | [valohai\_cli/utils/friendly\_option\_parser.py](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree#diff-dmFsb2hhaV9jbGkvdXRpbHMvZnJpZW5kbHlfb3B0aW9uX3BhcnNlci5weQ==) | `90% <90%> (ø)` | | | ... and [1 more](https://codecov.io/gh/valohai/valohai-cli/pull/33/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=footer). Last update [b909441...8a10837](https://codecov.io/gh/valohai/valohai-cli/pull/33?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/valohai_cli/commands/execution/run.py b/valohai_cli/commands/execution/run.py index d266ee9..83527ec 100644 --- a/valohai_cli/commands/execution/run.py +++ b/valohai_cli/commands/execution/run.py @@ -1,3 +1,5 @@ +import re + import click from click.exceptions import BadParameter from click.globals import get_current_context @@ -9,10 +11,29 @@ import valohai_cli.git as git # this import style required for tests from valohai_cli.adhoc import create_adhoc_commit from valohai_cli.api import request from valohai_cli.ctx import get_project -from valohai_cli.messages import success, warn, error +from valohai_cli.utils.friendly_option_parser import FriendlyOptionParser +from valohai_cli.messages import success, warn from valohai_cli.utils import humanize_identifier, match_prefix +def sanitize_name(name): + return re.sub(r'[_ ]', '-', name) + + +def generate_sanitized_options(name): + seen = set() + for choice in ( + '--%s' % name, + '--%s' % sanitize_name(name), + ('--%s' % sanitize_name(name)).lower(), + ): + if ' ' in choice: + continue + if choice not in seen: + seen.add(choice) + yield choice + + class RunCommand(click.Command): """ A dynamically-generated subcommand that has Click options for parameters and inputs. @@ -47,7 +68,7 @@ class RunCommand(click.Command): self.image = image self.watch = bool(watch) super(RunCommand, self).__init__( - name=step.name.lower().replace(' ', '-'), + name=sanitize_name(step.name.lower()), callback=self.execute, add_help_option=True, ) @@ -65,9 +86,7 @@ class RunCommand(click.Command): """ assert isinstance(parameter, Parameter) option = click.Option( - param_decls=[ - '--%s' % parameter.name.replace('_', '-'), - ], + param_decls=list(generate_sanitized_options(parameter.name)), required=(parameter.default is None and not parameter.optional), default=parameter.default, help=parameter.description, @@ -85,9 +104,7 @@ class RunCommand(click.Command): """ assert isinstance(input, Input) option = click.Option( - param_decls=[ - '--%s' % input.name.replace('_', '-'), - ], + param_decls=list(generate_sanitized_options(input.name)), required=(input.default is None and not input.optional), default=input.default, metavar='URL', @@ -154,6 +171,14 @@ class RunCommand(click.Command): return commit + def make_parser(self, ctx): + parser = super(RunCommand, self).make_parser(ctx) + # This is somewhat naughty, but allows us to easily hook into here. + # Besides, FriendlyOptionParser does inherit from OptionParser anyway, + # and just overrides that one piece of behavior... + parser.__class__ = FriendlyOptionParser + return parser + @click.command(context_settings=dict(ignore_unknown_options=True), add_help_option=False) @click.argument('step') diff --git a/valohai_cli/commands/project/link.py b/valohai_cli/commands/project/link.py index 430c986..f699516 100644 --- a/valohai_cli/commands/project/link.py +++ b/valohai_cli/commands/project/link.py @@ -1,7 +1,7 @@ import click from valohai_cli.api import request -from valohai_cli.cli_utils import prompt_from_list +from valohai_cli.utils.cli_utils import prompt_from_list from valohai_cli.commands.project.create import create_project from valohai_cli.consts import yes_option from valohai_cli.ctx import get_project, set_project_link diff --git a/valohai_cli/git.py b/valohai_cli/git.py index 47e4664..95c7776 100644 --- a/valohai_cli/git.py +++ b/valohai_cli/git.py @@ -1,3 +1,4 @@ +import os import subprocess from valohai_cli.exceptions import NoGitRepo @@ -10,9 +11,10 @@ def check_git_output(args, directory): cwd=directory, shell=False, stderr=subprocess.STDOUT, + env=dict(os.environ, LC_ALL='C'), ) except subprocess.CalledProcessError as cpe: - if cpe.returncode == 128 and 'Not a git repository' in cpe.output.decode(): + if cpe.returncode == 128 and 'not a git repository' in cpe.output.decode().lower(): raise NoGitRepo(directory) raise diff --git a/valohai_cli/utils.py b/valohai_cli/utils/__init__.py similarity index 100% rename from valohai_cli/utils.py rename to valohai_cli/utils/__init__.py diff --git a/valohai_cli/cli_utils.py b/valohai_cli/utils/cli_utils.py similarity index 100% rename from valohai_cli/cli_utils.py rename to valohai_cli/utils/cli_utils.py diff --git a/valohai_cli/utils/friendly_option_parser.py b/valohai_cli/utils/friendly_option_parser.py new file mode 100644 index 0000000..0c8c134 --- /dev/null +++ b/valohai_cli/utils/friendly_option_parser.py @@ -0,0 +1,26 @@ +from click import OptionParser, NoSuchOption + +from .levenshtein import levenshtein + + +class FriendlyOptionParser(OptionParser): + """ + A friendlier version of OptionParser that uses Levenshtein distances to figure out + if the user has just misspelled an option name. + """ + def _match_long_opt(self, opt, explicit_value, state): + try: + return super(FriendlyOptionParser, self)._match_long_opt(opt, explicit_value, state) + except NoSuchOption as nse: + if not nse.possibilities: + # No possibilities were guessed, so attempt some deeper magic + nse.possibilities = [ + word + for word + in self._long_opt + if levenshtein( + word.lower().lstrip('-'), + nse.option_name.lower().lstrip('-'), + ) <= 4 + ] + raise diff --git a/valohai_cli/utils/levenshtein.py b/valohai_cli/utils/levenshtein.py new file mode 100644 index 0000000..940b974 --- /dev/null +++ b/valohai_cli/utils/levenshtein.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +# From http://hetland.org/coding/python/levenshtein.py, which is in the public domain. + + +def levenshtein(a, b): + "Calculates the Levenshtein distance between a and b." + n, m = len(a), len(b) + if n > m: + # Make sure n <= m, to use O(min(n,m)) space + a, b = b, a + n, m = m, n + + current = range(n + 1) + for i in range(1, m + 1): + previous, current = current, [i] + [0] * n + for j in range(1, n + 1): + add, delete = previous[j] + 1, current[j - 1] + 1 + change = previous[j - 1] + if a[j - 1] != b[i - 1]: + change = change + 1 + current[j] = min(add, delete, change) + + return current[n] diff --git a/valohai_cli/yaml_wizard.py b/valohai_cli/yaml_wizard.py index b18d36b..a00fdf3 100644 --- a/valohai_cli/yaml_wizard.py +++ b/valohai_cli/yaml_wizard.py @@ -5,7 +5,7 @@ import click import requests import yaml -from valohai_cli.cli_utils import prompt_from_list +from valohai_cli.utils.cli_utils import prompt_from_list from valohai_cli.messages import error, success, warn from valohai_cli.utils import find_scripts
Step parameters with spaces don't work Passing step parameters with spaces does't work, it does use such default parameters fine though. Maybe we should require a separate `id` attribute in params for this? Or should we just slugify the name? Or enforce more strict naming for parameters? Example below. =========== ``` (valohai-cli) bog:darknet-example ruksi$ vh exec run --step="Generate text" --'Textual Seed'=1 Usage: vh execution run [OPTIONS] [ARGS]... Error: no such option: --Textual Seed ```
valohai/valohai-cli
diff --git a/tests/commands/execution/test_run.py b/tests/commands/execution/test_run.py index 2b90142..fc78001 100644 --- a/tests/commands/execution/test_run.py +++ b/tests/commands/execution/test_run.py @@ -117,3 +117,35 @@ def test_run_no_git(runner, logged_in_and_linked): with RunAPIMock(project_id, None, {}): output = runner.invoke(run, args, catch_exceptions=False).output assert 'is not a Git repository' in output + + +def test_param_input_sanitization(runner, logged_in_and_linked): + with open(get_project().get_config_filename(), 'w') as yaml_fp: + yaml_fp.write(''' +- step: + name: Train model + image: busybox + command: "false" + inputs: + - name: Ridiculously Complex Input_Name + default: http://example.com/ + parameters: + - name: Parameter With Highly Convoluted Name + pass-as: --simple={v} + type: integer + default: 1 +''') + output = runner.invoke(run, ['train', '--help'], catch_exceptions=False).output + assert '--Parameter-With-Highly-Convoluted-Name' in output + assert '--parameter-with-highly-convoluted-name' in output + assert '--Ridiculously-Complex-Input-Name' in output + assert '--ridiculously-complex-input-name' in output + + +def test_typo_check(runner, logged_in_and_linked): + with open(get_project().get_config_filename(), 'w') as yaml_fp: + yaml_fp.write(CONFIG_YAML) + args = ['train', '--max-setps=80'] # Oopsy! + output = runner.invoke(run, args, catch_exceptions=False).output + assert '(Possible options:' in output + assert '--max-steps' in output
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 4 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "requests-mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5.2", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 coverage==6.2 distlib==0.3.9 filelock==3.4.1 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jsonschema==3.2.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 PyYAML==6.0.1 requests==2.27.1 requests-mock==1.12.1 requests-toolbelt==1.0.0 six==1.17.0 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.26.20 -e git+https://github.com/valohai/valohai-cli.git@fbdad62d3e4177586622e18a53c30fe4f081416a#egg=valohai_cli valohai-yaml==0.25.2 virtualenv==20.17.1 zipp==3.6.0
name: valohai-cli channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - click==8.0.4 - coverage==6.2 - distlib==0.3.9 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jsonschema==3.2.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - pyyaml==6.0.1 - requests==2.27.1 - requests-mock==1.12.1 - requests-toolbelt==1.0.0 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - valohai-yaml==0.25.2 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/valohai-cli
[ "tests/commands/execution/test_run.py::test_run_no_git", "tests/commands/execution/test_run.py::test_param_input_sanitization", "tests/commands/execution/test_run.py::test_typo_check" ]
[ "tests/commands/execution/test_run.py::test_param_type_validation" ]
[ "tests/commands/execution/test_run.py::test_run_requires_step", "tests/commands/execution/test_run.py::test_run[regular-False-False-False]", "tests/commands/execution/test_run.py::test_run[regular-False-False-True]", "tests/commands/execution/test_run.py::test_run[regular-False-True-False]", "tests/commands/execution/test_run.py::test_run[regular-False-True-True]", "tests/commands/execution/test_run.py::test_run[regular-True-False-False]", "tests/commands/execution/test_run.py::test_run[regular-True-False-True]", "tests/commands/execution/test_run.py::test_run[regular-True-True-False]", "tests/commands/execution/test_run.py::test_run[regular-True-True-True]", "tests/commands/execution/test_run.py::test_run[adhoc-False-False-False]", "tests/commands/execution/test_run.py::test_run[adhoc-False-False-True]", "tests/commands/execution/test_run.py::test_run[adhoc-False-True-False]", "tests/commands/execution/test_run.py::test_run[adhoc-False-True-True]", "tests/commands/execution/test_run.py::test_run[adhoc-True-False-False]", "tests/commands/execution/test_run.py::test_run[adhoc-True-False-True]", "tests/commands/execution/test_run.py::test_run[adhoc-True-True-False]", "tests/commands/execution/test_run.py::test_run[adhoc-True-True-True]" ]
[]
MIT License
2,762
[ "valohai_cli/utils/levenshtein.py", "valohai_cli/commands/execution/run.py", "valohai_cli/cli_utils.py", "valohai_cli/yaml_wizard.py", "valohai_cli/commands/project/link.py", "valohai_cli/utils/friendly_option_parser.py", "valohai_cli/git.py", "valohai_cli/utils.py" ]
[ "valohai_cli/utils/levenshtein.py", "valohai_cli/utils/cli_utils.py", "valohai_cli/utils/__init__.py", "valohai_cli/commands/execution/run.py", "valohai_cli/yaml_wizard.py", "valohai_cli/commands/project/link.py", "valohai_cli/utils/friendly_option_parser.py", "valohai_cli/git.py" ]
conan-io__conan-3197
7ce59398132725f3da4e548baab7e2fa3f28f5e1
2018-07-11 15:41:15
f59b0d5773ca17e222236b1b6b55785f03539216
memsharded: Weird failure @lasote, it seems Python3.6 does not support lzma on OSX? https://conan-ci.jfrog.info/blue/organizations/jenkins/ConanTestSuite/detail/PR-3197/2/pipeline/13 lasote: In the Mac slave, python is built from sources using pyenv. Apparently, when built from sources, you need to have the lzma library installed or python won't enable the module. I'll try to rebuild it. The distributed binary packages in home brew (which I have locally) have the lzma enabled.
diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py index f59d4441e..02c9bc118 100644 --- a/.ci/jenkins/conf.py +++ b/.ci/jenkins/conf.py @@ -9,7 +9,7 @@ winpylocation = {"py27": "C:\\Python27\\python.exe", macpylocation = {"py27": "/usr/bin/python", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python", "py34": "/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python", - "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.3/bin/python"} + "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python"} linuxpylocation = {"py27": "/usr/bin/python2.7", "py34": "/usr/bin/python3.4", diff --git a/conans/client/remote_manager.py b/conans/client/remote_manager.py index dcecb3190..929174655 100644 --- a/conans/client/remote_manager.py +++ b/conans/client/remote_manager.py @@ -202,7 +202,8 @@ class RemoteManager(object): t1 = time.time() def filter_function(urls): - file_url = urls.get(EXPORT_SOURCES_TGZ_NAME) + file_url = urls.pop(EXPORT_SOURCES_TGZ_NAME, None) + check_compressed_files(EXPORT_SOURCES_TGZ_NAME, urls) if file_url: urls = {EXPORT_SOURCES_TGZ_NAME: file_url} else: @@ -340,8 +341,6 @@ def compress_package_files(files, symlinks, dest_folder, output): def compress_files(files, symlinks, name, dest_dir): - """Compress the package and returns the new dict (name => content) of files, - only with the conanXX files and the compressed file""" t1 = time.time() # FIXME, better write to disk sequentially and not keep tgz contents in memory tgz_path = os.path.join(dest_dir, name) @@ -378,11 +377,20 @@ def compress_files(files, symlinks, name, dest_dir): return tgz_path +def check_compressed_files(tgz_name, files): + bare_name = os.path.splitext(tgz_name)[0] + for f in files: + if bare_name == os.path.splitext(f)[0]: + raise ConanException("This Conan version is not prepared to handle '%s' file format. " + "Please upgrade conan client." % f) + + def unzip_and_get_files(files, destination_dir, tgz_name): """Moves all files from package_files, {relative_name: tmp_abs_path} to destination_dir, unzipping the "tgz_name" if found""" tgz_file = files.pop(tgz_name, None) + check_compressed_files(tgz_name, files) if tgz_file: uncompress_file(tgz_file, destination_dir) os.remove(tgz_file) diff --git a/conans/client/tools/files.py b/conans/client/tools/files.py index 5d6ddef97..0d42fe368 100644 --- a/conans/client/tools/files.py +++ b/conans/client/tools/files.py @@ -11,6 +11,7 @@ from conans.client.output import ConanOutput from conans.errors import ConanException from conans.util.files import (load, save, _generic_algorithm_sum) from conans.unicode import get_cwd +import six _global_output = None @@ -68,6 +69,11 @@ def unzip(filename, destination=".", keep_permissions=False, pattern=None): filename.endswith(".tbz2") or filename.endswith(".tar.bz2") or filename.endswith(".tar")): return untargz(filename, destination, pattern) + if filename.endswith(".tar.xz") or filename.endswith(".txz"): + if six.PY2: + raise ConanException("XZ format not supported in Python 2. Use Python 3 instead") + return untargz(filename, destination, pattern) + import zipfile full_path = os.path.normpath(os.path.join(get_cwd(), destination)) diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py index 455397546..17557672b 100644 --- a/conans/model/conan_file.py +++ b/conans/model/conan_file.py @@ -12,6 +12,8 @@ from conans.model.user_info import DepsUserInfo from conans.paths import RUN_LOG_NAME from conans.tools import environment_append, no_op from conans.client.output import Color +from conans.client.run_environment import RunEnvironment +from conans.client.tools.oss import os_info def create_options(conanfile): @@ -255,15 +257,23 @@ class ConanFile(object): """ define cpp_build_info, flags, etc """ - def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True): - if not win_bash: - retcode = self._runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) - else: + def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True, + ignore_errors=False, run_environment=False): + def _run(): + if not win_bash: + return self._runner(command, output, os.path.abspath(RUN_LOG_NAME), cwd) # FIXME: run in windows bash is not using output - retcode = tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem, - msys_mingw=msys_mingw) + return tools.run_in_windows_bash(self, bashcmd=command, cwd=cwd, subsystem=subsystem, + msys_mingw=msys_mingw) + if run_environment: + with tools.environment_append(RunEnvironment(self).vars): + if os_info.is_macos: + command = 'DYLD_LIBRARY_PATH="%s" %s' % (os.environ.get('DYLD_LIBRARY_PATH', ''), command) + retcode = _run() + else: + retcode = _run() - if retcode != 0: + if not ignore_errors and retcode != 0: raise ConanException("Error %d while executing %s" % (retcode, command)) return retcode diff --git a/conans/server/rest/controllers/file_upload_download_controller.py b/conans/server/rest/controllers/file_upload_download_controller.py index b49091a07..fe8ec36ab 100644 --- a/conans/server/rest/controllers/file_upload_download_controller.py +++ b/conans/server/rest/controllers/file_upload_download_controller.py @@ -20,7 +20,12 @@ class FileUploadDownloadController(Controller): token = request.query.get("signature", None) file_path = service.get_file_path(filepath, token) # https://github.com/kennethreitz/requests/issues/1586 - mimetype = "x-gzip" if filepath.endswith(".tgz") else "auto" + if filepath.endswith(".tgz"): + mimetype = "x-gzip" + elif filepath.endswith(".txz"): + mimetype = "x-xz" + else: + mimetype = "auto" return static_file(os.path.basename(file_path), root=os.path.dirname(file_path), mimetype=mimetype)
Prepare 1.6 to fail gently with "conan_package" different compression extensions. From https://github.com/conan-io/conan/issues/648 To be ready for the future, versions from 1.6 could fail gently if a conan_package with a different extension (not tgz) is found in a remote package. Maybe saying: Your current conan version does not allow the `xxx` extension for conan package. Update to the latest Conan version to get support.
conan-io/conan
diff --git a/conans/test/functional/runner_test.py b/conans/test/functional/runner_test.py index f0dc4fda8..d00f9eb71 100644 --- a/conans/test/functional/runner_test.py +++ b/conans/test/functional/runner_test.py @@ -2,7 +2,6 @@ import os import six import unittest -from io import StringIO from conans.client.runner import ConanRunner from conans.test.utils.tools import TestClient @@ -19,6 +18,18 @@ class RunnerTest(unittest.TestCase): client.run("build .") return client + def ignore_error_test(self): + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + def source(self): + ret = self.run("not_a_command", ignore_errors=True) + self.output.info("RETCODE %s" % (ret!=0)) +""" + client = TestClient() + client.save({"conanfile.py": conanfile}) + client.run("source .") + self.assertIn("RETCODE True", client.out) + def basic_test(self): conanfile = ''' from conans import ConanFile diff --git a/conans/test/integration/run_envronment_test.py b/conans/test/integration/run_envronment_test.py index 48a3638b9..efd78257b 100644 --- a/conans/test/integration/run_envronment_test.py +++ b/conans/test/integration/run_envronment_test.py @@ -34,3 +34,68 @@ class HelloConan(ConanFile): client.save({"conanfile.py": reuse}, clean_first=True) client.run("install . --build missing") client.run("build .") + self.assertIn("Hello Hello0", client.out) + + def test_shared_run_environment(self): + client = TestClient() + cmake = """set(CMAKE_CXX_COMPILER_WORKS 1) +set(CMAKE_CXX_ABI_COMPILED 1) +project(MyHello CXX) +cmake_minimum_required(VERSION 2.8.12) + +add_library(hello SHARED hello.cpp) +add_executable(say_hello main.cpp) +target_link_libraries(say_hello hello)""" + hello_h = """#ifdef WIN32 + #define HELLO_EXPORT __declspec(dllexport) +#else + #define HELLO_EXPORT +#endif + +HELLO_EXPORT void hello(); +""" + hello_cpp = r"""#include "hello.h" +#include <iostream> +void hello(){ + std::cout<<"Hello Tool!\n"; +} +""" + main = """#include "hello.h" + int main(){ + hello(); + } + """ + conanfile = """from conans import ConanFile, CMake +class Pkg(ConanFile): + exports_sources = "*" + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def package(self): + self.copy("*say_hello.exe", dst="bin", keep_path=False) + self.copy("*say_hello", dst="bin", keep_path=False) + self.copy(pattern="*.dll", dst="bin", keep_path=False) + self.copy(pattern="*.dylib", dst="lib", keep_path=False) + self.copy(pattern="*.so", dst="lib", keep_path=False) +""" + client.save({"conanfile.py": conanfile, + "CMakeLists.txt": cmake, + "main.cpp": main, + "hello.cpp": hello_cpp, + "hello.h": hello_h}) + client.run("create . Pkg/0.1@user/testing") + + reuse = '''from conans import ConanFile +class HelloConan(ConanFile): + requires = "Pkg/0.1@user/testing" + + def build(self): + self.run("say_hello", run_environment=True) +''' + + client.save({"conanfile.py": reuse}, clean_first=True) + client.run("install .") + client.run("build .") + self.assertIn("Hello Tool!", client.out) diff --git a/conans/test/util/xz_test.py b/conans/test/util/xz_test.py new file mode 100644 index 000000000..262549736 --- /dev/null +++ b/conans/test/util/xz_test.py @@ -0,0 +1,88 @@ +import os +from unittest import TestCase +import six +import unittest +import tarfile + +from conans.test.utils.test_files import temp_folder +from conans.tools import unzip, save +from conans.util.files import load, save_files +from conans.errors import ConanException +from conans.test.utils.tools import TestClient, TestServer +from conans.model.ref import ConanFileReference, PackageReference + + +class XZTest(TestCase): + def test_error_xz(self): + server = TestServer() + ref = ConanFileReference.loads("Pkg/0.1@user/channel") + export = server.paths.export(ref) + save_files(export, {"conanfile.py": "#", + "conanmanifest.txt": "#", + "conan_export.txz": "#"}) + client = TestClient(servers={"default": server}, + users={"default": [("lasote", "mypass")]}) + error = client.run("install Pkg/0.1@user/channel", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: This Conan version is not prepared to handle " + "'conan_export.txz' file format", client.out) + + def test_error_sources_xz(self): + server = TestServer() + ref = ConanFileReference.loads("Pkg/0.1@user/channel") + client = TestClient(servers={"default": server}, + users={"default": [("lasote", "mypass")]}) + export = server.paths.export(ref) + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + exports_sources = "*" +""" + save_files(export, {"conanfile.py": conanfile, + "conanmanifest.txt": "1", + "conan_sources.txz": "#"}) + error = client.run("install Pkg/0.1@user/channel --build", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: This Conan version is not prepared to handle " + "'conan_sources.txz' file format", client.out) + + def test_error_package_xz(self): + server = TestServer() + ref = ConanFileReference.loads("Pkg/0.1@user/channel") + client = TestClient(servers={"default": server}, + users={"default": [("lasote", "mypass")]}) + export = server.paths.export(ref) + conanfile = """from conans import ConanFile +class Pkg(ConanFile): + exports_sources = "*" +""" + save_files(export, {"conanfile.py": conanfile, + "conanmanifest.txt": "1"}) + pkg_ref = PackageReference(ref, "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9") + package = server.paths.package(pkg_ref) + save_files(package, {"conaninfo.txt": "#", + "conanmanifest.txt": "1", + "conan_package.txz": "#"}) + error = client.run("install Pkg/0.1@user/channel", ignore_error=True) + self.assertTrue(error) + self.assertIn("ERROR: This Conan version is not prepared to handle " + "'conan_package.txz' file format", client.out) + + @unittest.skipUnless(six.PY3, "only Py3") + def test(self): + tmp_dir = temp_folder() + file_path = os.path.join(tmp_dir, "a_file.txt") + save(file_path, "my content!") + txz = os.path.join(tmp_dir, "sample.tar.xz") + with tarfile.open(txz, "w:xz") as tar: + tar.add(file_path, "a_file.txt") + + dest_folder = temp_folder() + unzip(txz, dest_folder) + content = load(os.path.join(dest_folder, "a_file.txt")) + self.assertEqual(content, "my content!") + + @unittest.skipUnless(six.PY2, "only Py2") + def test_error_python2(self): + with self.assertRaisesRegexp(ConanException, "XZ format not supported in Python 2"): + dest_folder = temp_folder() + unzip("somefile.tar.xz", dest_folder)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 5 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y cmake" ], "python": "3.6", "reqs_path": [ "conans/requirements.txt", "conans/requirements_server.txt", "conans/requirements_dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==1.6.6 attrs==22.2.0 beautifulsoup4==4.12.3 bottle==0.12.25 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 colorama==0.3.9 -e git+https://github.com/conan-io/conan.git@7ce59398132725f3da4e548baab7e2fa3f28f5e1#egg=conan coverage==4.2 deprecation==2.0.7 distro==1.1.0 fasteners==0.19 future==0.16.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 mock==1.3.0 node-semver==0.2.0 nose==1.3.7 packaging==21.3 parameterized==0.8.1 patch==1.16 pbr==6.1.1 pluggy==1.0.0 pluginbase==0.7 py==1.11.0 Pygments==2.14.0 PyJWT==1.7.1 pylint==1.8.4 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.12 requests==2.27.1 six==1.17.0 soupsieve==2.3.2.post1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 waitress==2.0.0 WebOb==1.8.9 WebTest==2.0.35 wrapt==1.16.0 zipp==3.6.0
name: conan channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==1.6.6 - attrs==22.2.0 - beautifulsoup4==4.12.3 - bottle==0.12.25 - charset-normalizer==2.0.12 - codecov==2.1.13 - colorama==0.3.9 - coverage==4.2 - deprecation==2.0.7 - distro==1.1.0 - fasteners==0.19 - future==0.16.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isort==5.10.1 - lazy-object-proxy==1.7.1 - mccabe==0.7.0 - mock==1.3.0 - node-semver==0.2.0 - nose==1.3.7 - packaging==21.3 - parameterized==0.8.1 - patch==1.16 - pbr==6.1.1 - pluggy==1.0.0 - pluginbase==0.7 - py==1.11.0 - pygments==2.14.0 - pyjwt==1.7.1 - pylint==1.8.4 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.12 - requests==2.27.1 - six==1.17.0 - soupsieve==2.3.2.post1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - waitress==2.0.0 - webob==1.8.9 - webtest==2.0.35 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/conan
[ "conans/test/util/xz_test.py::XZTest::test" ]
[ "conans/test/integration/run_envronment_test.py::RunEnvironmentTest::test_run_environment", "conans/test/integration/run_envronment_test.py::RunEnvironmentTest::test_shared_run_environment", "conans/test/util/xz_test.py::XZTest::test_error_package_xz", "conans/test/util/xz_test.py::XZTest::test_error_sources_xz", "conans/test/util/xz_test.py::XZTest::test_error_xz" ]
[ "conans/test/functional/runner_test.py::RunnerTest::test_write_to_stringio" ]
[]
MIT License
2,763
[ "conans/client/remote_manager.py", "conans/client/tools/files.py", "conans/server/rest/controllers/file_upload_download_controller.py", ".ci/jenkins/conf.py", "conans/model/conan_file.py" ]
[ "conans/client/remote_manager.py", "conans/client/tools/files.py", "conans/server/rest/controllers/file_upload_download_controller.py", ".ci/jenkins/conf.py", "conans/model/conan_file.py" ]
dwavesystems__dimod-219
54303f184427d1b3a2741cfb7ea1b1263f68a361
2018-07-11 18:33:24
8ebfffa42319aa4850cfc5a1c99a8711eac44722
diff --git a/dimod/response.py b/dimod/response.py index 1378ed70..e2b30309 100644 --- a/dimod/response.py +++ b/dimod/response.py @@ -230,7 +230,7 @@ class Response(Iterable, Sized): True """ - return all(future.done() for future in self._futures) + return all(future.done() for future in self._futures.get('futures', tuple())) ############################################################################################## # Construction and updates @@ -559,7 +559,7 @@ class Response(Iterable, Sized): response = cls.empty(vartype) # now dump all of the remaining information into the _futures - response._futures = {'futures': futures, + response._futures = {'futures': list(futures), 'samples_key': samples_key, 'data_vector_keys': data_vector_keys, 'info_keys': info_keys,
Response futures appear to be strings? ``` from dwave.system.samplers import DWaveSampler sampler = DWaveSampler(profile='BAY4') h = {} J = {(0, 4): -1} response = sampler.sample_ising(h, J, num_reads=10) print response.done() ``` yields ``` AttributeError: 'str' object has no attribute 'done' ``` If I print `response._futures` right after creating the response, I get ``` {'ignore_extra_keys': True, 'samples_key': 'samples', 'futures': (<dwave.cloud.computation.Future object at 0x7f31793d8110>,), 'data_vector_keys': {'energies': 'energy', 'num_occurrences': 'num_occurrences'}, 'active_variables': [0, 4], 'variable_labels': [0, 4], 'info_keys': {'timing': 'timing'}} ``` Is it possible `response._futures` is temporarily overwritten somewhere? Apparently yes, `response._futures` can hold something else than futures: at https://github.com/dwavesystems/dimod/blob/master/dimod/response.py#L561, it gets populated with a bunch of stuff, including non-futures. So `response._futures` can be different things at different stages of the response ... :-1:
dwavesystems/dimod
diff --git a/tests/test_response.py b/tests/test_response.py index bd18948e..c3c0ddf3 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -301,6 +301,8 @@ class TestResponse(unittest.TestCase): response = dimod.Response.from_futures(_futures(), vartype=dimod.SPIN, num_variables=3) + self.assertTrue(response.done()) + matrix = response.samples_matrix npt.assert_equal(matrix, np.matrix([[-1, -1, 1], [-1, -1, 1]]))
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 decorator==5.1.1 -e git+https://github.com/dwavesystems/dimod.git@54303f184427d1b3a2741cfb7ea1b1263f68a361#egg=dimod docutils==0.18.1 enum34==1.1.6 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 jsonschema==2.6.0 MarkupSafe==2.0.1 mock==2.0.0 networkx==2.0 numpy==1.11.3 packaging==21.3 pandas==0.22.0 pbr==6.1.1 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 six==1.11.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: dimod channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - decorator==5.1.1 - docutils==0.18.1 - enum34==1.1.6 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - jsonschema==2.6.0 - markupsafe==2.0.1 - mock==2.0.0 - networkx==2.0 - numpy==1.11.3 - packaging==21.3 - pandas==0.22.0 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - six==1.11.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/dimod
[ "tests/test_response.py::TestResponse::test_from_futures" ]
[]
[ "tests/test_response.py::TestResponse::test__iter__", "tests/test_response.py::TestResponse::test_change_vartype_copy", "tests/test_response.py::TestResponse::test_change_vartype_inplace", "tests/test_response.py::TestResponse::test_data_docstrings", "tests/test_response.py::TestResponse::test_data_vectors_are_arrays", "tests/test_response.py::TestResponse::test_data_vectors_copy", "tests/test_response.py::TestResponse::test_data_vectors_not_array_like", "tests/test_response.py::TestResponse::test_data_vectors_wrong_length", "tests/test_response.py::TestResponse::test_empty", "tests/test_response.py::TestResponse::test_from_dicts", "tests/test_response.py::TestResponse::test_from_dicts_unlike_labels", "tests/test_response.py::TestResponse::test_from_dicts_unsortable_labels", "tests/test_response.py::TestResponse::test_from_futures_column_subset", "tests/test_response.py::TestResponse::test_from_futures_extra_keys", "tests/test_response.py::TestResponse::test_from_futures_typical", "tests/test_response.py::TestResponse::test_from_matrix", "tests/test_response.py::TestResponse::test_from_pandas", "tests/test_response.py::TestResponse::test_infer_vartype", "tests/test_response.py::TestResponse::test_instantiation", "tests/test_response.py::TestResponse::test_instantiation_without_energy", "tests/test_response.py::TestResponse::test_partial_relabel", "tests/test_response.py::TestResponse::test_partial_relabel_inplace", "tests/test_response.py::TestResponse::test_relabel_copy", "tests/test_response.py::TestResponse::test_relabel_docstring", "tests/test_response.py::TestResponse::test_samples_num_limited", "tests/test_response.py::TestResponse::test_update", "tests/test_response.py::TestResponse::test_update_energy" ]
[]
Apache License 2.0
2,764
[ "dimod/response.py" ]
[ "dimod/response.py" ]
dwavesystems__dimod-220
54303f184427d1b3a2741cfb7ea1b1263f68a361
2018-07-11 18:39:01
8ebfffa42319aa4850cfc5a1c99a8711eac44722
diff --git a/dimod/response.py b/dimod/response.py index 1378ed70..89891418 100644 --- a/dimod/response.py +++ b/dimod/response.py @@ -230,7 +230,7 @@ class Response(Iterable, Sized): True """ - return all(future.done() for future in self._futures) + return all(future.done() for future in self._futures.get('futures', tuple())) ############################################################################################## # Construction and updates @@ -421,7 +421,7 @@ class Response(Iterable, Sized): import pandas as pd variable_labels = list(samples_df.columns) - samples_matrix = samples_df.as_matrix(columns=variable_labels) + samples_matrix = np.matrix(samples_df.values) if isinstance(data_vectors, pd.DataFrame): raise NotImplementedError("support for DataFrame data_vectors is forthcoming") @@ -559,7 +559,7 @@ class Response(Iterable, Sized): response = cls.empty(vartype) # now dump all of the remaining information into the _futures - response._futures = {'futures': futures, + response._futures = {'futures': list(futures), 'samples_key': samples_key, 'data_vector_keys': data_vector_keys, 'info_keys': info_keys,
numpy FutureWarning FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead. samples_matrix = samples_df.as_matrix(columns=variable_labels)
dwavesystems/dimod
diff --git a/tests/test_response.py b/tests/test_response.py index bd18948e..c3c0ddf3 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -301,6 +301,8 @@ class TestResponse(unittest.TestCase): response = dimod.Response.from_futures(_futures(), vartype=dimod.SPIN, num_variables=3) + self.assertTrue(response.done()) + matrix = response.samples_matrix npt.assert_equal(matrix, np.matrix([[-1, -1, 1], [-1, -1, 1]]))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 decorator==5.1.1 -e git+https://github.com/dwavesystems/dimod.git@54303f184427d1b3a2741cfb7ea1b1263f68a361#egg=dimod docutils==0.18.1 enum34==1.1.6 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 jsonschema==2.6.0 MarkupSafe==2.0.1 mock==2.0.0 networkx==2.0 numpy==1.11.3 packaging==21.3 pandas==0.22.0 pbr==6.1.1 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 six==1.11.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: dimod channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - decorator==5.1.1 - docutils==0.18.1 - enum34==1.1.6 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - jsonschema==2.6.0 - markupsafe==2.0.1 - mock==2.0.0 - networkx==2.0 - numpy==1.11.3 - packaging==21.3 - pandas==0.22.0 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - six==1.11.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/dimod
[ "tests/test_response.py::TestResponse::test_from_futures" ]
[]
[ "tests/test_response.py::TestResponse::test__iter__", "tests/test_response.py::TestResponse::test_change_vartype_copy", "tests/test_response.py::TestResponse::test_change_vartype_inplace", "tests/test_response.py::TestResponse::test_data_docstrings", "tests/test_response.py::TestResponse::test_data_vectors_are_arrays", "tests/test_response.py::TestResponse::test_data_vectors_copy", "tests/test_response.py::TestResponse::test_data_vectors_not_array_like", "tests/test_response.py::TestResponse::test_data_vectors_wrong_length", "tests/test_response.py::TestResponse::test_empty", "tests/test_response.py::TestResponse::test_from_dicts", "tests/test_response.py::TestResponse::test_from_dicts_unlike_labels", "tests/test_response.py::TestResponse::test_from_dicts_unsortable_labels", "tests/test_response.py::TestResponse::test_from_futures_column_subset", "tests/test_response.py::TestResponse::test_from_futures_extra_keys", "tests/test_response.py::TestResponse::test_from_futures_typical", "tests/test_response.py::TestResponse::test_from_matrix", "tests/test_response.py::TestResponse::test_from_pandas", "tests/test_response.py::TestResponse::test_infer_vartype", "tests/test_response.py::TestResponse::test_instantiation", "tests/test_response.py::TestResponse::test_instantiation_without_energy", "tests/test_response.py::TestResponse::test_partial_relabel", "tests/test_response.py::TestResponse::test_partial_relabel_inplace", "tests/test_response.py::TestResponse::test_relabel_copy", "tests/test_response.py::TestResponse::test_relabel_docstring", "tests/test_response.py::TestResponse::test_samples_num_limited", "tests/test_response.py::TestResponse::test_update", "tests/test_response.py::TestResponse::test_update_energy" ]
[]
Apache License 2.0
2,765
[ "dimod/response.py" ]
[ "dimod/response.py" ]
jamescooke__flake8-aaa-34
6c0ce3154fd266e222afffffbc6386b2d9d55adb
2018-07-11 20:12:30
6c0ce3154fd266e222afffffbc6386b2d9d55adb
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e45b27d..78198a4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -14,6 +14,12 @@ Unreleased_ See also `latest documentation <https://flake8-aaa.readthedocs.io/en/latest/>`_. +Changed +------- + +* Improved loading of Act blocks so that they can be found within context + managers. + 0.3.0_ - 2018/06/28 ------------------- diff --git a/flake8_aaa/act_block.py b/flake8_aaa/act_block.py index 07f1290..04558ed 100644 --- a/flake8_aaa/act_block.py +++ b/flake8_aaa/act_block.py @@ -1,4 +1,5 @@ -from .exceptions import NotActionBlock +import ast + from .helpers import node_is_pytest_raises, node_is_result_assignment from .types import ActBlockType @@ -19,6 +20,20 @@ class ActBlock(object): self.node = node self.block_type = block_type + @classmethod + def build_body(cls, body): + """ + Args: + body (list (ast.node)): List of nodes from a block. + + Returns: + list (ActBlock) + """ + act_blocks = [] + for child_node in body: + act_blocks += ActBlock.build(child_node) + return act_blocks + @classmethod def build(cls, node): """ @@ -26,18 +41,19 @@ class ActBlock(object): node (ast.node): A node, decorated with ``ASTTokens``. Returns: - ActBlock - - Raises: - NotActionBlock: When ``node`` does not look like an Act block. + list(ActBlock) """ if node_is_result_assignment(node): - return cls(node, ActBlockType.result_assignment) + return [cls(node, ActBlockType.result_assignment)] elif node_is_pytest_raises(node): - return cls(node, ActBlockType.pytest_raises) + return [cls(node, ActBlockType.pytest_raises)] # Check if line marked with '# act' if node.first_token.line.strip().endswith('# act'): - return cls(node, ActBlockType.marked_act) + return [cls(node, ActBlockType.marked_act)] + + # Recurse if it's a context manager + if isinstance(node, ast.With): + return cls.build_body(node.body) - raise NotActionBlock() + return [] diff --git a/flake8_aaa/exceptions.py b/flake8_aaa/exceptions.py index 0484c00..b0fcc95 100644 --- a/flake8_aaa/exceptions.py +++ b/flake8_aaa/exceptions.py @@ -2,13 +2,6 @@ class Flake8AAAException(Exception): pass -class NotActionBlock(Flake8AAAException): - """ - Used when parsing if lines of a function should be considered Action - blocks. - """ - - class ValidationError(Flake8AAAException): """ Attributes: diff --git a/flake8_aaa/function.py b/flake8_aaa/function.py index 6f9d757..184b5f0 100644 --- a/flake8_aaa/function.py +++ b/flake8_aaa/function.py @@ -1,7 +1,7 @@ from .act_block import ActBlock from .arrange_block import ArrangeBlock from .assert_block import AssertBlock -from .exceptions import NotActionBlock, ValidationError +from .exceptions import ValidationError from .helpers import function_is_noop from .types import ActBlockType @@ -50,12 +50,7 @@ class Function(object): Raises: ValidationError """ - act_blocks = [] - for child_node in self.node.body: - try: - act_blocks.append(ActBlock.build(child_node)) - except NotActionBlock: - continue + act_blocks = ActBlock.build_body(self.node.body) if len(act_blocks) < 1: raise ValidationError(self.node.lineno, self.node.col_offset, 'AAA01 no Act block found in test')
Act blocks within context managers are not found, even when marked with #act ``` with mock.patch.object(sync_metadata, 'delay') as m_delay: result = url(project_biz) # act ```
jamescooke/flake8-aaa
diff --git a/tests/act_block/test_build.py b/tests/act_block/test_build.py index 500980e..a704be6 100644 --- a/tests/act_block/test_build.py +++ b/tests/act_block/test_build.py @@ -1,7 +1,6 @@ import pytest from flake8_aaa.act_block import ActBlock -from flake8_aaa.exceptions import NotActionBlock from flake8_aaa.types import ActBlockType # TODO act blocks need testing with 'result =' indented @@ -20,9 +19,11 @@ def test_not_actions(first_node_with_tokens): def test_raises_block(first_node_with_tokens): result = ActBlock.build(first_node_with_tokens.body[0]) - assert isinstance(result, ActBlock) - assert result.node == first_node_with_tokens.body[0] - assert result.block_type == ActBlockType.pytest_raises + assert isinstance(result, list) + assert len(result) == 1 + assert isinstance(result[0], ActBlock) + assert result[0].node == first_node_with_tokens.body[0] + assert result[0].block_type == ActBlockType.pytest_raises @pytest.mark.parametrize( @@ -35,9 +36,26 @@ def test_raises_block(first_node_with_tokens): def test(expected_type, first_node_with_tokens): result = ActBlock.build(first_node_with_tokens) - assert isinstance(result, ActBlock) - assert result.node == first_node_with_tokens - assert result.block_type == expected_type + assert isinstance(result, list) + assert len(result) == 1 + assert isinstance(result[0], ActBlock) + assert result[0].node == first_node_with_tokens + assert result[0].block_type == expected_type + + [email protected]( + 'code_str', [ + "with mock.patch('stats.deletion_manager.deleted'):\n result = existing_user.delete()", + ] +) +def test_nested(first_node_with_tokens): + result = ActBlock.build(first_node_with_tokens) + + assert isinstance(result, list) + assert len(result) == 1 + assert isinstance(result[0], ActBlock) + assert result[0].block_type == ActBlockType.result_assignment + assert result[0].node == first_node_with_tokens.body[0] @pytest.mark.parametrize( @@ -51,5 +69,6 @@ def test(expected_type, first_node_with_tokens): ] ) def test_not_actions(first_node_with_tokens): - with pytest.raises(NotActionBlock): - ActBlock.build(first_node_with_tokens) + result = ActBlock.build(first_node_with_tokens) + + assert result == [] diff --git a/tests/function/test_load_act_block.py b/tests/function/test_load_act_block.py index 38c2453..9e0d91f 100644 --- a/tests/function/test_load_act_block.py +++ b/tests/function/test_load_act_block.py @@ -54,6 +54,69 @@ def test_raises_in_assert(function): assert result.node.first_token.line == ' result = existing_user.delete()\n' [email protected]( + 'code_str', + [ + ''' +def test(existing_user): + with mock.patch('stats.deletion_manager.deleted'): + result = existing_user.delete() + + assert result is True + assert result.retrieved is False +''' + ], + ids=['act in context manager'], +) +def test_in_cm(function): + result = function.load_act_block() + + assert isinstance(result, ActBlock) + assert result.block_type == ActBlockType.result_assignment + assert result.node.first_token.line == ' result = existing_user.delete()\n' + + [email protected]( + 'code_str', + [ + ''' +def test_no_recreate(existing_user): + with mock.patch('stats.creation_manager.created'): + with pytest.raises(ValidationError): + existing_user.create() +''' + ], + ids=['pytest raises in context manager'], +) +def test_raises_in_cm(function): + result = function.load_act_block() + + assert isinstance(result, ActBlock) + assert result.block_type == ActBlockType.pytest_raises + assert result.node.first_token.line == ' with pytest.raises(ValidationError):\n' + + [email protected]( + 'code_str', + [ + ''' +def test_creation(stub_user): + with mock.patch('stats.creation_manager.created'): + stub_user.create() # act + + assert stub_user.exists() +''' + ], + ids=['marked act block in context manager'], +) +def test_marked_in_cm(function): + result = function.load_act_block() + + assert isinstance(result, ActBlock) + assert result.block_type == ActBlockType.marked_act + assert result.node.first_token.line == ' stub_user.create() # act\n' + + # --- FAILURES --- @@ -86,6 +149,17 @@ def test(): eggs = 1 # act assert chickens + eggs == 2 + ''', + ''' +def test_read(self): + with open('data') as data_file: + result = data_file.read() + + assert result == '' + + result = data_file.read() + + assert result == '' ''', ] )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 4 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asttokens==1.1.10 attrs==22.2.0 certifi==2021.5.30 flake8==3.5.0 -e git+https://github.com/jamescooke/flake8-aaa.git@6c0ce3154fd266e222afffffbc6386b2d9d55adb#egg=flake8_aaa importlib-metadata==4.8.3 iniconfig==1.1.1 mccabe==0.6.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycodestyle==2.3.1 pyflakes==1.6.0 pyparsing==3.1.4 pytest==7.0.1 six==1.11.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: flake8-aaa channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asttokens==1.1.10 - attrs==22.2.0 - flake8==3.5.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mccabe==0.6.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.3.1 - pyflakes==1.6.0 - pyparsing==3.1.4 - pytest==7.0.1 - six==1.11.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/flake8-aaa
[ "tests/act_block/test_build.py::test_raises_block[\\ndef", "tests/act_block/test_build.py::test[result", "tests/act_block/test_build.py::test[with", "tests/act_block/test_build.py::test[data[new_key]", "tests/act_block/test_build.py::test_nested[with", "tests/act_block/test_build.py::test_not_actions[act", "tests/act_block/test_build.py::test_not_actions[actions", "tests/act_block/test_build.py::test_not_actions[person", "tests/act_block/test_build.py::test_not_actions[result", "tests/act_block/test_build.py::test_not_actions[results", "tests/act_block/test_build.py::test_not_actions[with", "tests/function/test_load_act_block.py::test_in_cm[act", "tests/function/test_load_act_block.py::test_raises_in_cm[pytest", "tests/function/test_load_act_block.py::test_marked_in_cm[marked", "tests/function/test_load_act_block.py::test_multiple_acts[\\ndef" ]
[]
[ "tests/function/test_load_act_block.py::test_assignment[\\ndef", "tests/function/test_load_act_block.py::test_act_marker[\\ndef", "tests/function/test_load_act_block.py::test_raises_in_assert[\\ndef", "tests/function/test_load_act_block.py::test_no_block[\\ndef" ]
[]
MIT License
2,766
[ "CHANGELOG.rst", "flake8_aaa/exceptions.py", "flake8_aaa/function.py", "flake8_aaa/act_block.py" ]
[ "CHANGELOG.rst", "flake8_aaa/exceptions.py", "flake8_aaa/function.py", "flake8_aaa/act_block.py" ]
danqing__dqpy-13
e610cf42ae9685469d9417d475170722da133440
2018-07-12 10:28:03
e610cf42ae9685469d9417d475170722da133440
coveralls: ## Pull Request Test Coverage Report for [Build 95](https://coveralls.io/builds/18419129) * **3** of **27** **(11.11%)** changed or added relevant lines in **2** files are covered. * No unchanged relevant lines lost coverage. * Overall coverage decreased (**-5.2%**) to **94.771%** --- | Changes Missing Coverage | Covered Lines | Changed/Added Lines | % | | :-----|--------------|--------|---: | | [dq/cache.py](https://coveralls.io/builds/18419129/source?filename=dq%2Fcache.py#L1) | 0 | 24 | 0.0% <!-- | **Total:** | **3** | **27** | **11.11%** | --> | Totals | [![Coverage Status](https://coveralls.io/builds/18419129/badge)](https://coveralls.io/builds/18419129) | | :-- | --: | | Change from base [Build 93](https://coveralls.io/builds/18419110): | -5.2% | | Covered Lines: | 435 | | Relevant Lines: | 459 | --- ##### 💛 - [Coveralls](https://coveralls.io)
diff --git a/config/local.toml b/config/local.toml index 0f30dcf..608b2d8 100644 --- a/config/local.toml +++ b/config/local.toml @@ -17,6 +17,11 @@ host = "localhost" port = 6379 db = 0 +[faulty_redis] +host = "localhost" +port = 6380 +db = 0 + [sql] url = "mysql+pymysql://[email protected]:3306/dqpy?charset=utf8mb4" flavor = "mysql" diff --git a/docs/conf.py b/docs/conf.py index d215b10..e70b55e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,9 +59,9 @@ author = 'Danqing Liu' # built documents. # # The short X.Y version. -version = '1.3' +version = '2.0' # The full version, including alpha/beta/rc tags. -release = '1.3.2' +release = '2.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/index.rst b/docs/index.rst index cbce8ad..af59232 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,6 +12,16 @@ Danqing's shared Python library, written with love for `the better Danqing <http :maxdepth: 2 :caption: Table of Contents +Cache +----- + +The cache module provides a simple JSON and Redis based caching decorator. + +.. automodule:: dq.cache + :members: + :undoc-members: + :show-inheritance: + Config ------ diff --git a/dq/cache.py b/dq/cache.py new file mode 100644 index 0000000..9e715ec --- /dev/null +++ b/dq/cache.py @@ -0,0 +1,42 @@ +import json +from functools import wraps + +from dq.redis import init_redis, strval + +_redis = init_redis('cache') + + +def cache(ttl=600, key_func=None): + """Cache decorator. + + This can be applied to any function that returns a raw or JSON-serializable + response. To allow caching, the ``cache`` key must be set in the config, + namely the redis connection for cache. + + If the function has a keyword argument named ``fresh``, then the decorator + gets a fresh copy when it's set to a truthy value. + + If the function has a keyword argument named ``raw``, then the decorator + returns the raw (bytes) Redis response as-is, without JSON-deserializing. + + :param number ttl: The TTL in second. Default is 10 minutes. + :param func key_func: The key function. This function should take the same + arguments as the wrapped function, and return the corresponding cache + key as a string. + """ + def memoize(func): + @wraps(func) + def decorated_func(*args, **kwargs): + if not _redis or not key_func: + return func(*args, **kwargs) + key = key_func(*args, **kwargs) + if not kwargs.get('fresh'): + resp = _redis.get(key) + if resp is not None: + return resp if kwargs.get('raw') else json.loads(resp) + resp = func(*args, **kwargs) + _redis.setex(key, ttl, strval(resp)) + return resp + + return decorated_func + return memoize diff --git a/dq/orm.py b/dq/orm.py index ab897b8..588c18e 100644 --- a/dq/orm.py +++ b/dq/orm.py @@ -1,22 +1,13 @@ -import logging import json -import pickle from collections import namedtuple from uuid import uuid4 import arrow -import redis from sqlalchemy import Column, Integer, String from sqlalchemy.ext.declarative import as_declarative from sqlalchemy_utils import ArrowType -from dq.config import Config from dq.database import query_with_limit_offset, Session -from dq.logging import error - -logger = logging.getLogger(__name__) - -SERVICE_NAME = Config.get('meta.name') # An attribute of a model. The order is the following: # - name (string): The name of the attribute (the key) @@ -162,69 +153,6 @@ class TimeMixin(object): deleted_at = Column(ArrowType) -class Cache(object): - """Redis cache engine. This should not be used externally! - - This is a separate Redis connection from the one in the redis package. The - purpose is twofold: - - 1. The cache Redis does not allow response decoding, but the user Redis - requires it. - 2. By separating the two, if a project does not have an explicit cache - configuration, then cache can be effectively disabled globally. - """ - - _instance = None - _attempted = False - - @classmethod - def instance(cls): - if cls._instance: - return cls._instance - if cls._attempted: - return None - cachecfg = Config.get('cache') - if not cachecfg: - cls._attempted = True - return None - try: - i = redis.StrictRedis(**cachecfg) - # This will attempt to connect to Redis and throw an error if the - # connection is invalid. - i.info() - cls._instance = i - return i - except Exception: - error(logger, 'Unable to connect to cache Redis', None) - cls._attempted = True - return None - - @classmethod - def get(cls, key): - """Get the value corresponding to the key in the cache. - - :param string key: The cache key. - :returns bytes: The cache value in raw bytes if exists. If the key does - not exist or if cache server cannot be reached, returns ``None``. - """ - i = cls.instance() - try: - return i.get(key) if i else None - except Exception as e: - error(logger, 'Error querying cache', {'key': key, 'error': e}) - return None - - @classmethod - def set(cls, key, value, ttl): - i = cls.instance() - if not i: - return - try: - i.setex(key, ttl, pickle.dumps(value)) - except Exception as e: - error(logger, 'Error setting cache', {'key': key, 'error': e}) - - class QueryMixin(object): """Query helper functions useful to all models. @@ -232,37 +160,9 @@ class QueryMixin(object): class will not need to inherit directly from it. """ - @classmethod - def cache_key(cls, column, value, contains_deleted=False, - contains_empty=False): - key = '{}.cache.{}.{}.{}'.format( - SERVICE_NAME, cls.__tablename__, column, value, - ) - if contains_deleted: - key += '.del' - if contains_empty: - key += '.empty' - return key - - @classmethod - def get_cache(cls, column, value, contains_deleted=False, - contains_empty=False): - """Get the object from cache.""" - key = cls.cache_key(column, value, contains_deleted=contains_deleted, - contains_empty=contains_empty) - cached = Cache.get(key) - return pickle.loads(cached) if cached else None - - @classmethod - def save_to_cache(cls, obj, column, value, ttl=3600, - contains_deleted=False, contains_empty=False): - key = cls.cache_key(column, value, contains_deleted=contains_deleted, - contains_empty=contains_empty) - Cache.set(key, obj, ttl) - @classmethod def get_by(cls, column, value, for_update=False, contains_deleted=False, - contains_empty=False, ttl=3600, fresh=False, session=None): + contains_empty=False, session=None): """Get the object satisfying the query condition. :param string column: The name of the column to query by. @@ -273,8 +173,6 @@ class QueryMixin(object): :param boolean contains_empty: Whether to contain empty records. Default is ``False`` and if value is ``None``, ``None`` will be returned. - :param int ttl: The TTL of cache. If < 0, no cache will be used. - :param boolean fresh: Whether to fetch the response fresh from the DB. :param Session session: Optional SQL session. :returns QueryMixin: The matching object. This method is designed for unique queries and in case of multiple matches, only the first one @@ -283,14 +181,6 @@ class QueryMixin(object): if not contains_empty and value is None: return None - if ttl > 0 and not fresh: - cached = cls.get_cache( - column, value, contains_deleted=contains_deleted, - contains_empty=contains_empty, - ) - if cached: - return cached - session = session or Session() args = {} args[column] = value @@ -299,13 +189,7 @@ class QueryMixin(object): query = session.query(cls).filter_by(**args) if for_update: query = query.with_for_update() - obj = query.first() - - if obj and ttl > 0: - cls.save_to_cache(obj, column, value, ttl=ttl, - contains_deleted=contains_deleted, - contains_empty=contains_empty) - return obj + return query.first() @classmethod def get_multi(cls, column, value, sort_column='updated_at', desc=True, @@ -378,21 +262,17 @@ class IDBase(QueryMixin, DictMixin, TimeMixin): id = Column(Integer, primary_key=True) @classmethod - def get(cls, id, contains_deleted=False, ttl=3600, fresh=False, - session=None): + def get(cls, id, for_update=False, contains_deleted=False, session=None): """Get an object by its ID. :param int id: The ID of the object. + :param boolean for_update: Whether the query is for updating the row. :param boolean contains_deleted: Whether to contain deleted objects. Default is ``False``. - :param int ttl: Cache TTL in seconds. If <= 0 no cache will be used. - Default is 3600 (1 hour). - :param boolean fresh: Whether to fetch a fresh copy even if cache - exists. Default is ``False``. :param Session session: Optional SQL session to use. """ - return cls.get_by('id', id, contains_deleted=contains_deleted, - fresh=fresh, session=session) + return cls.get_by('id', id, for_update=for_update, + contains_deleted=contains_deleted, session=session) @as_declarative() @@ -406,18 +286,14 @@ class UUIDBase(QueryMixin, DictMixin, TimeMixin): uuid = Column(String, primary_key=True, default=uuid4_string) @classmethod - def get(cls, uuid, contains_deleted=False, ttl=3600, fresh=False, - session=None): + def get(cls, uuid, for_update=False, contains_deleted=False, session=None): """Get an object by its UUID. :param string uuid: The UUID of the object. + :param boolean for_update: Whether the query is for updating the row. :param boolean contains_deleted: Whether to contain deleted objects. Default is ``False``. - :param int ttl: Cache TTL in seconds. If <= 0 no cache will be used. - Default is 3600 (1 hour). - :param boolean fresh: Whether to fetch a fresh copy even if cache - exists. Default is ``False``. :param Session session: Optional SQL session to use. """ - return cls.get_by('uuid', uuid, contains_deleted=contains_deleted, - fresh=fresh, session=session) + return cls.get_by('uuid', uuid, for_update=for_update, + contains_deleted=contains_deleted, session=session) diff --git a/dq/redis.py b/dq/redis.py index e881d98..b9ae3bc 100644 --- a/dq/redis.py +++ b/dq/redis.py @@ -1,8 +1,34 @@ import json +import logging import redis from dq.config import Config +from dq.logging import error + +logger = logging.getLogger(__name__) + + +def init_redis(key): + """Initialize a Redis connection. + + :param string key: The config key. The entry should at least contain the + host, port and db number of the instance. + :returns redis: The redis instance if the config exists and is valid, and + None otherwise. + """ + cfg = Config.get(key) + if not cfg: + return None + try: + i = redis.StrictRedis(**cfg) + # This will attempt to connect to Redis and throw an error if the + # connection is invalid. + i.info() + return i + except Exception: + error(logger, 'Unable to connect to Redis', None) + return None def strval(value): @@ -34,7 +60,7 @@ def strvals(*values): class Redis(object): - _instance = redis.StrictRedis(**Config.get('redis')) + _instance = init_redis('redis') @classmethod def exists(cls, key): @@ -55,6 +81,17 @@ class Redis(object): """ return cls._instance.get(key) + @classmethod + def get_json(cls, key): + """Get the value stored at the key as JSON. + + :param string key: The Redis key. + :returns object: The value of the key as an unserialized JSON object. + If the key does not exist, ``None`` will be returned. + """ + resp = cls.get(key) + return json.loads(resp) if resp else None + @classmethod def set(cls, key, value): """Set the key to the specified value. diff --git a/setup.py b/setup.py index cd18a9e..9cd2fe2 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import find_packages, setup -__version__ = '1.3.2' +__version__ = '2.0.0' requires = [ 'arrow==0.12.1',
Model serialization and caching improvements
danqing/dqpy
diff --git a/tests/test_cache.py b/tests/test_cache.py new file mode 100644 index 0000000..0221d26 --- /dev/null +++ b/tests/test_cache.py @@ -0,0 +1,45 @@ +import unittest + +from dq import cache + + +class TestCache(unittest.TestCase): + + def test_cache_none(self): + redis = cache._redis + cache._redis = None + + def key_func(): + return '' + + @cache.cache(key_func=key_func) + def some_func(): + return 'hello' + + assert some_func() == 'hello' + + cache._redis = redis + + @cache.cache() + def some_func_2(): + return 'hello2' + + assert some_func_2() == 'hello2' + + def test_cache_fresh(self): + value = 'hello' + + def key_func(fresh=False, raw=False): + return 'cache-fresh-key' + + @cache.cache(key_func=key_func) + def some_func(fresh=False, raw=False): + nonlocal value + if value == 'hello': + value = 'world' + return 'hello' + return value + + assert some_func(fresh=True) == 'hello' + assert some_func(raw=True) == b'hello' + assert some_func(fresh=True) == 'world' diff --git a/tests/test_orm.py b/tests/test_orm.py index a437675..036d0e1 100644 --- a/tests/test_orm.py +++ b/tests/test_orm.py @@ -1,6 +1,5 @@ import ast import json -import mock import unittest from contextlib import suppress from uuid import uuid4 @@ -8,7 +7,6 @@ from uuid import uuid4 import arrow from dq.database import commit_scope, save_to_database -from dq.orm import Cache from tests.models import Table2, User, UserType @@ -18,8 +16,6 @@ class TestORM(unittest.TestCase): with suppress(Exception), commit_scope() as session: session.query(Table2).delete() session.query(User).delete() - if Cache._instance: - Cache._instance.flushall() def test_string(self): t2 = Table2(id=1, key=1, key2=1, user_type=UserType.admin, @@ -34,56 +30,6 @@ class TestORM(unittest.TestCase): 'created_at': 1508544000, } - def test_cache_key(self): - assert Table2.cache_key('uuid', '123') == 'dqpy.cache.table2.uuid.123' - k2 = Table2.cache_key('id', 123, contains_deleted=True, - contains_empty=True) - assert k2 == 'dqpy.cache.table2.id.123.del.empty' - - def test_cached(self): - t2 = Table2(id=999, user_uuid=str(uuid4()), key=1, key2=1, - user_type=UserType.regular) - save_to_database(t2) - - key = 'dqpy.cache.table2.id.999' - assert not Cache.get(key) - - t2 = Table2.get(999) - assert t2.key == 1 - assert Cache.get(key) - with commit_scope() as session: - session.query(Table2).delete() - t2 = Table2.get(999) - assert t2.key == 1 - - def test_cache_error(self): - Cache._instance = 123 - assert not Cache.get('cornell') - Cache.set('cornell', '#1', 123) - Cache._instance = None - - @mock.patch('dq.config.Config.get') - def test_cache_broken(self, mock_cfg): - mock_cfg.return_value = {'port': 1234} - Cache._instance = None - Cache._attempted = None - assert not Cache.instance() - assert Cache._attempted - assert not Cache.get('cornell') - Cache.set('cornell', '#1', 123) - Cache._attempted = None - - @mock.patch('dq.config.Config.get') - def test_cache_none(self, mock_cfg): - mock_cfg.return_value = None - Cache._instance = None - Cache._attempted = None - assert not Cache.instance() - assert Cache._attempted - assert not Cache.get('cornell') - Cache.set('cornell', '#1', 123) - Cache._attempted = None - def test_to_dict(self): uuid = str(uuid4()) now = arrow.get() @@ -146,7 +92,7 @@ class TestORM(unittest.TestCase): assert t2.id == 1 t2.key2 = 10 save_to_database(t2) - t2 = Table2.get_by('user_uuid', uuid, fresh=True) + t2 = Table2.get_by('user_uuid', uuid) assert t2.key2 == 10 def test_get_by_empty(self): diff --git a/tests/test_redis.py b/tests/test_redis.py index 472f1ed..d385f87 100644 --- a/tests/test_redis.py +++ b/tests/test_redis.py @@ -3,11 +3,16 @@ import unittest from time import sleep from uuid import uuid4 -from dq.redis import Redis +from dq.redis import init_redis, Redis class TestRedis(unittest.TestCase): + def test_init(self): + assert init_redis('redis') + assert not init_redis('none') + assert not init_redis('faulty_redis') + def test_exists(self): key = 'dqtest-{}'.format(uuid4()) assert not Redis.exists(key) @@ -24,7 +29,7 @@ class TestRedis(unittest.TestCase): assert Redis.delete(key) assert not Redis.get(key) - def test_get_json(self): + def test_get_json_string(self): key = 'dqtest-{}'.format(uuid4()) assert not Redis.get(key) Redis.setex(key, [1, '2', True], 1) @@ -32,6 +37,14 @@ class TestRedis(unittest.TestCase): Redis.setex(key, {'cornell': '#1'}, 1) assert Redis.get(key) == '{"cornell": "#1"}' + def test_get_json(self): + key = 'dqtest-{}'.format(uuid4()) + assert not Redis.get(key) + Redis.setex(key, [1, '2', True], 1) + assert Redis.get_json(key) == [1, '2', True] + Redis.setex(key, {'cornell': '#1'}, 1) + assert Redis.get_json(key) == {'cornell': '#1'} + def test_expire(self): key = 'dqtest-{}'.format(uuid4()) assert Redis.set(key, '1')
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 6 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 alembic==1.15.2 arrow==0.12.1 astroid==3.3.9 asttokens==3.0.0 attrs==25.3.0 babel==2.17.0 backports.tarfile==1.2.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 coverage==7.8.0 coveralls==4.0.1 cryptography==44.0.2 cssselect==1.3.0 cssutils==2.11.1 decorator==5.2.1 dill==0.3.9 docopt==0.6.2 docutils==0.21.2 -e git+https://github.com/danqing/dqpy.git@e610cf42ae9685469d9417d475170722da133440#egg=dqpy emails==0.5.15 exceptiongroup==1.2.2 executing==2.2.0 fancycompleter==0.9.1 flake8==7.2.0 greenlet==3.1.1 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 ipython==8.18.1 isort==6.0.1 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jedi==0.19.2 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 lxml==5.3.1 Mako==1.3.9 markdown-it-py==3.0.0 MarkupSafe==3.0.2 matplotlib-inline==0.1.7 mccabe==0.7.0 mdurl==0.1.2 mock==5.2.0 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 parso==0.8.4 pdbpp==0.10.3 pexpect==4.9.0 platformdirs==4.3.7 pluggy==1.5.0 premailer==3.10.0 prompt_toolkit==3.0.50 ptyprocess==0.7.0 pure_eval==0.2.3 pycodestyle==2.13.0 pycparser==2.22 pyflakes==3.3.2 Pygments==2.19.1 pylint==3.3.6 PyMySQL==0.9.2 pyrepl==0.9.0 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 readme_renderer==44.0 redis==2.10.6 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 schematics==2.1.0 SecretStorage==3.3.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 SQLAlchemy==1.2.12 SQLAlchemy-Utils==0.33.5 stack-data==0.6.3 toml==0.9.6 tomli==2.2.1 tomlkit==0.13.2 traitlets==5.14.3 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 wcwidth==0.2.13 wmctrl==0.5 zipp==3.21.0
name: dqpy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - alembic==1.15.2 - arrow==0.12.1 - astroid==3.3.9 - asttokens==3.0.0 - attrs==25.3.0 - babel==2.17.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - coverage==7.8.0 - coveralls==4.0.1 - cryptography==44.0.2 - cssselect==1.3.0 - cssutils==2.11.1 - decorator==5.2.1 - dill==0.3.9 - docopt==0.6.2 - docutils==0.21.2 - emails==0.5.15 - exceptiongroup==1.2.2 - executing==2.2.0 - fancycompleter==0.9.1 - flake8==7.2.0 - greenlet==3.1.1 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipython==8.18.1 - isort==6.0.1 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jedi==0.19.2 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - lxml==5.3.1 - mako==1.3.9 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - matplotlib-inline==0.1.7 - mccabe==0.7.0 - mdurl==0.1.2 - mock==5.2.0 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - parso==0.8.4 - pdbpp==0.10.3 - pexpect==4.9.0 - platformdirs==4.3.7 - pluggy==1.5.0 - premailer==3.10.0 - prompt-toolkit==3.0.50 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pycodestyle==2.13.0 - pycparser==2.22 - pyflakes==3.3.2 - pygments==2.19.1 - pylint==3.3.6 - pymysql==0.9.2 - pyrepl==0.9.0 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - readme-renderer==44.0 - redis==2.10.6 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - schematics==2.1.0 - secretstorage==3.3.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - sqlalchemy==1.2.12 - sqlalchemy-utils==0.33.5 - stack-data==0.6.3 - toml==0.9.6 - tomli==2.2.1 - tomlkit==0.13.2 - traitlets==5.14.3 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - wcwidth==0.2.13 - wmctrl==0.5 - zipp==3.21.0 prefix: /opt/conda/envs/dqpy
[ "tests/test_cache.py::TestCache::test_cache_none", "tests/test_orm.py::TestORM::test_from_dict", "tests/test_orm.py::TestORM::test_string", "tests/test_orm.py::TestORM::test_to_dict", "tests/test_orm.py::TestORM::test_to_json" ]
[ "tests/test_cache.py::TestCache::test_cache_fresh", "tests/test_orm.py::TestORM::test_get_by", "tests/test_orm.py::TestORM::test_get_by_deleted", "tests/test_orm.py::TestORM::test_get_by_deleted_contains", "tests/test_orm.py::TestORM::test_get_by_empty", "tests/test_orm.py::TestORM::test_get_by_for_update", "tests/test_orm.py::TestORM::test_get_by_user", "tests/test_orm.py::TestORM::test_get_multi", "tests/test_redis.py::TestRedis::test_atomic_rw", "tests/test_redis.py::TestRedis::test_atomic_rw_error", "tests/test_redis.py::TestRedis::test_atomic_rw_hash", "tests/test_redis.py::TestRedis::test_atomic_rw_hash_error", "tests/test_redis.py::TestRedis::test_atomic_rw_hash_user_abort", "tests/test_redis.py::TestRedis::test_atomic_rw_user_abort", "tests/test_redis.py::TestRedis::test_exists", "tests/test_redis.py::TestRedis::test_expire", "tests/test_redis.py::TestRedis::test_get", "tests/test_redis.py::TestRedis::test_get_json", "tests/test_redis.py::TestRedis::test_get_json_string", "tests/test_redis.py::TestRedis::test_hash", "tests/test_redis.py::TestRedis::test_init", "tests/test_redis.py::TestRedis::test_list" ]
[]
[]
MIT License
2,767
[ "docs/conf.py", "setup.py", "dq/cache.py", "config/local.toml", "dq/orm.py", "dq/redis.py", "docs/index.rst" ]
[ "docs/conf.py", "setup.py", "dq/cache.py", "config/local.toml", "dq/orm.py", "dq/redis.py", "docs/index.rst" ]
streamlink__streamlink-1927
6bf654a291e2a792088384c7ba7c9dc9b2a14b1d
2018-07-12 11:27:40
42c34ca104f9a1761164dfce6c3ebabea984a823
diff --git a/.travis.yml b/.travis.yml index 0da9dd5e..d60852f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,9 +12,14 @@ matrix: - python: '3.5' env: BUILD_DOCS=yes BUILD_INSTALLER=yes BUILD_SDIST=yes DEPLOY_PYPI=yes - python: '3.6' - - python: '3.7-dev' + - python: '3.7' + dist: xenial + sudo: true + - python: '3.8-dev' + dist: xenial + sudo: true allow_failures: - - python: '3.7-dev' + - python: '3.8-dev' before_install: - pip install --disable-pip-version-check --upgrade pip setuptools diff --git a/appveyor.yml b/appveyor.yml index c40ae00f..1b26ac10 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -5,10 +5,12 @@ environment: - PYTHON: "C:\\Python34" - PYTHON: "C:\\Python35" - PYTHON: "C:\\Python36" + - PYTHON: "C:\\Python37" - PYTHON: "C:\\Python27-x64" - PYTHON: "C:\\Python34-x64" - PYTHON: "C:\\Python35-x64" - PYTHON: "C:\\Python36-x64" + - PYTHON: "C:\\Python37-x64" install: # If there is a newer build queued for the same PR, cancel this one. diff --git a/src/streamlink/stream/dash.py b/src/streamlink/stream/dash.py index e202e53b..1d1c5b65 100644 --- a/src/streamlink/stream/dash.py +++ b/src/streamlink/stream/dash.py @@ -11,6 +11,7 @@ from streamlink.stream.stream import Stream from streamlink.stream.dash_manifest import MPD, sleeper, sleep_until, utc, freeze_timeline from streamlink.stream.ffmpegmux import FFMPEGMuxer from streamlink.stream.segmented import SegmentedStreamReader, SegmentedStreamWorker, SegmentedStreamWriter +from streamlink.utils.l10n import Language log = logging.getLogger(__name__) @@ -196,6 +197,31 @@ class DASHStream(Stream): if not audio: audio = [None] + locale = session.localization + locale_lang = locale.language + lang = None + available_languages = set() + + # if the locale is explicitly set, prefer that language over others + for aud in audio: + if aud and aud.lang: + available_languages.add(aud.lang) + try: + if locale.explicit and aud.lang and Language.get(aud.lang) == locale_lang: + lang = aud.lang + except LookupError: + continue + + if not lang: + # filter by the first language that appears + lang = audio[0] and audio[0].lang + + log.debug("Available languages for DASH audio streams: {0} (using: {1})".format(", ".join(available_languages) or "NONE", lang or "n/a")) + + # if the language is given by the stream, filter out other languages that do not match + if len(available_languages) > 1: + audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio)) + for vid, aud in itertools.product(video, audio): stream = DASHStream(session, mpd, vid, aud, **args) stream_name = [] diff --git a/src/streamlink/stream/hls.py b/src/streamlink/stream/hls.py index 1faba0ae..0fe12ad5 100644 --- a/src/streamlink/stream/hls.py +++ b/src/streamlink/stream/hls.py @@ -295,15 +295,18 @@ class MuxedHLSStream(MuxedStream): def __init__(self, session, video, audio, force_restart=False, ffmpeg_options=None, **args): tracks = [video] + maps = ["0:v"] if audio: if isinstance(audio, list): tracks.extend(audio) else: tracks.append(audio) + for i in range(1, len(tracks)): + maps.append("{0}:a".format(i)) substreams = map(lambda url: HLSStream(session, url, force_restart=force_restart, **args), tracks) ffmpeg_options = ffmpeg_options or {} - super(MuxedHLSStream, self).__init__(session, *substreams, format="mpegts", **ffmpeg_options) + super(MuxedHLSStream, self).__init__(session, *substreams, format="mpegts", maps=maps, **ffmpeg_options) class HLSStream(HTTPStream): diff --git a/src/streamlink/utils/l10n.py b/src/streamlink/utils/l10n.py index 0164c7cd..ef4eb8a1 100644 --- a/src/streamlink/utils/l10n.py +++ b/src/streamlink/utils/l10n.py @@ -1,4 +1,5 @@ import locale +import logging from streamlink.compat import is_py2 @@ -16,6 +17,8 @@ DEFAULT_LANGUAGE = "en" DEFAULT_COUNTRY = "US" DEFAULT_LANGUAGE_CODE = "{0}_{1}".format(DEFAULT_LANGUAGE, DEFAULT_COUNTRY) +log = logging.getLogger(__name__) + class Country(object): def __init__(self, alpha2, alpha3, numeric, name, official_name=None): @@ -147,6 +150,7 @@ class Localization(object): self._language_code = DEFAULT_LANGUAGE_CODE else: raise + log.debug("Language code: {0}".format(self._language_code)) def equivalent(self, language=None, country=None): equivalent = True
Audio Spanish I don't hear this video in Spanish, why?, Is there any way I can keep it from being heard in English? ``` https://www.atresplayer.com/antena3/series/el-cuento-de-la-criada/temporada-1/capitulo-6-el-lugar-de-la-mujer_5b364bee7ed1a8dd360b5b7b/ ``` ### Logs ``` "C:\Program Files (x86)\Streamlink\bin\streamlink.exe" h ttps://www.atresplayer.com/antena3/series/el-cuento-de-la-criada/temporada-1/cap itulo-6-el-lugar-de-la-mujer_5b364bee7ed1a8dd360b5b7b/ best --player ffplay -l d ebug [cli][debug] OS: Windows 7 [cli][debug] Python: 3.5.2 [cli][debug] Streamlink: 0.14.2 [cli][debug] Requests(2.19.1), Socks(1.6.7), Websocket(0.48.0) [cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.co m/antena3/series/el-cuento-de-la-criada/temporada-1/capitulo-6-el-lugar-de-la-mu jer_5b364bee7ed1a8dd360b5b7b/ [plugin.atresplayer][debug] API URL: https://api.atresplayer.com/client/v1/playe r/episode/5b364bee7ed1a8dd360b5b7b [plugin.atresplayer][debug] Stream source: https://geodeswowa3player.akamaized.n et/vcg/_definst_/assets4/2018/06/29/7875368D-BF45-4012-9EBC-C4F78984B672/hls.smi l/playlist.m3u8?pulse=assets4%2F2018%2F06%2F29%2F7875368D-BF45-4012-9EBC-C4F7898 4B672%2F%7C1531195580%7Cb6e79f7291966a25070a79138cd19cb9 (application/vnd.apple. mpegurl) [stream.hls][debug] Using external audio tracks for stream 1080p (language=es, n ame=Spanish) [stream.hls][debug] Using external audio tracks for stream 720p (language=es, na me=Spanish) [stream.hls][debug] Using external audio tracks for stream 480p (language=es, na me=Spanish) [stream.hls][debug] Using external audio tracks for stream 360p (language=es, na me=Spanish) [stream.hls][debug] Using external audio tracks for stream 240p (language=es, na me=Spanish) [plugin.atresplayer][debug] Stream source: https://geodeswowa3player.akamaized.n et/vcg/_definst_/assets4/2018/06/29/7875368D-BF45-4012-9EBC-C4F78984B672/dash.sm il/manifest_mvlist.mpd (application/dash+xml) [cli][info] Available streams: 240p (worst), 240p+a128k, 360p, 360p+a128k, 480p, 480p+a128k, 720p, 720p+a128k, 1080p, 1080p+a128k (best) [cli][info] Starting player: ffplay .................... .................... .................... ```
streamlink/streamlink
diff --git a/tests/streams/test_dash.py b/tests/streams/test_dash.py index 807b8a18..cf406185 100644 --- a/tests/streams/test_dash.py +++ b/tests/streams/test_dash.py @@ -1,7 +1,4 @@ import unittest -import unittest -from streamlink.stream.dash import DASHStreamWorker -from tests.mock import MagicMock, patch, ANY, Mock, call from streamlink import PluginError from streamlink.stream import * @@ -41,8 +38,8 @@ class TestDASHStream(unittest.TestCase): Mock(adaptationSets=[ Mock(contentProtection=None, representations=[ - Mock(id=1, mimeType="audio/mp4", bandwidth=128.0), - Mock(id=2, mimeType="audio/mp4", bandwidth=256.0) + Mock(id=1, mimeType="audio/mp4", bandwidth=128.0, lang='en'), + Mock(id=2, mimeType="audio/mp4", bandwidth=256.0, lang='en') ]) ]) ]) @@ -63,7 +60,7 @@ class TestDASHStream(unittest.TestCase): representations=[ Mock(id=1, mimeType="video/mp4", height=720), Mock(id=2, mimeType="video/mp4", height=1080), - Mock(id=3, mimeType="audio/aac", bandwidth=128.0) + Mock(id=3, mimeType="audio/aac", bandwidth=128.0, lang='en') ]) ]) ]) @@ -84,8 +81,8 @@ class TestDASHStream(unittest.TestCase): representations=[ Mock(id=1, mimeType="video/mp4", height=720), Mock(id=2, mimeType="video/mp4", height=1080), - Mock(id=3, mimeType="audio/aac", bandwidth=128.0), - Mock(id=4, mimeType="audio/aac", bandwidth=256.0) + Mock(id=3, mimeType="audio/aac", bandwidth=128.0, lang='en'), + Mock(id=4, mimeType="audio/aac", bandwidth=256.0, lang='en') ]) ]) ]) @@ -98,6 +95,108 @@ class TestDASHStream(unittest.TestCase): sorted(["720p+a128k", "1080p+a128k", "720p+a256k", "1080p+a256k"]) ) + @patch('streamlink.stream.dash.MPD') + def test_parse_manifest_audio_multi_lang(self, mpdClass): + mpd = mpdClass.return_value = Mock(periods=[ + Mock(adaptationSets=[ + Mock(contentProtection=None, + representations=[ + Mock(id=1, mimeType="video/mp4", height=720), + Mock(id=2, mimeType="video/mp4", height=1080), + Mock(id=3, mimeType="audio/aac", bandwidth=128.0, lang='en'), + Mock(id=4, mimeType="audio/aac", bandwidth=128.0, lang='es') + ]) + ]) + ]) + + streams = DASHStream.parse_manifest(self.session, self.test_url) + mpdClass.assert_called_with(ANY, base_url="http://test.bar", url="http://test.bar/foo.mpd") + + self.assertSequenceEqual( + sorted(list(streams.keys())), + sorted(["720p", "1080p"]) + ) + + self.assertEqual(streams["720p"].audio_representation.lang, "en") + self.assertEqual(streams["1080p"].audio_representation.lang, "en") + + @patch('streamlink.stream.dash.MPD') + def test_parse_manifest_audio_multi_lang_alpha3(self, mpdClass): + mpd = mpdClass.return_value = Mock(periods=[ + Mock(adaptationSets=[ + Mock(contentProtection=None, + representations=[ + Mock(id=1, mimeType="video/mp4", height=720), + Mock(id=2, mimeType="video/mp4", height=1080), + Mock(id=3, mimeType="audio/aac", bandwidth=128.0, lang='eng'), + Mock(id=4, mimeType="audio/aac", bandwidth=128.0, lang='spa') + ]) + ]) + ]) + + streams = DASHStream.parse_manifest(self.session, self.test_url) + mpdClass.assert_called_with(ANY, base_url="http://test.bar", url="http://test.bar/foo.mpd") + + self.assertSequenceEqual( + sorted(list(streams.keys())), + sorted(["720p", "1080p"]) + ) + + self.assertEqual(streams["720p"].audio_representation.lang, "eng") + self.assertEqual(streams["1080p"].audio_representation.lang, "eng") + + @patch('streamlink.stream.dash.MPD') + def test_parse_manifest_audio_invalid_lang(self, mpdClass): + mpd = mpdClass.return_value = Mock(periods=[ + Mock(adaptationSets=[ + Mock(contentProtection=None, + representations=[ + Mock(id=1, mimeType="video/mp4", height=720), + Mock(id=2, mimeType="video/mp4", height=1080), + Mock(id=3, mimeType="audio/aac", bandwidth=128.0, lang='en_no_voice'), + ]) + ]) + ]) + + streams = DASHStream.parse_manifest(self.session, self.test_url) + mpdClass.assert_called_with(ANY, base_url="http://test.bar", url="http://test.bar/foo.mpd") + + self.assertSequenceEqual( + sorted(list(streams.keys())), + sorted(["720p", "1080p"]) + ) + + self.assertEqual(streams["720p"].audio_representation.lang, "en_no_voice") + self.assertEqual(streams["1080p"].audio_representation.lang, "en_no_voice") + + @patch('streamlink.stream.dash.MPD') + def test_parse_manifest_audio_multi_lang_locale(self, mpdClass): + self.session.localization.language.alpha2 = "es" + self.session.localization.explicit = True + + mpd = mpdClass.return_value = Mock(periods=[ + Mock(adaptationSets=[ + Mock(contentProtection=None, + representations=[ + Mock(id=1, mimeType="video/mp4", height=720), + Mock(id=2, mimeType="video/mp4", height=1080), + Mock(id=3, mimeType="audio/aac", bandwidth=128.0, lang='en'), + Mock(id=4, mimeType="audio/aac", bandwidth=128.0, lang='es') + ]) + ]) + ]) + + streams = DASHStream.parse_manifest(self.session, self.test_url) + mpdClass.assert_called_with(ANY, base_url="http://test.bar", url="http://test.bar/foo.mpd") + + self.assertSequenceEqual( + sorted(list(streams.keys())), + sorted(["720p", "1080p"]) + ) + + self.assertEqual(streams["720p"].audio_representation.lang, "es") + self.assertEqual(streams["1080p"].audio_representation.lang, "es") + @patch('streamlink.stream.dash.MPD') def test_parse_manifest_drm(self, mpdClass): mpd = mpdClass.return_value = Mock(periods=[Mock(adaptationSets=[Mock(contentProtection="DRM")])]) @@ -122,7 +221,7 @@ class TestDASHStream(unittest.TestCase): @patch('streamlink.stream.dash.DASHStreamReader') @patch('streamlink.stream.dash.FFMPEGMuxer') def test_stream_open_video_audio(self, muxer, reader): - stream = DASHStream(self.session, Mock(), Mock(id=1, mimeType="video/mp4"), Mock(id=2, mimeType="audio/mp3")) + stream = DASHStream(self.session, Mock(), Mock(id=1, mimeType="video/mp4"), Mock(id=2, mimeType="audio/mp3", lang='en')) open_reader = reader.return_value = Mock() stream.open() @@ -202,7 +301,7 @@ class TestDASHStreamWorker(unittest.TestCase): @patch("streamlink.stream.dash_manifest.time.sleep") def test_duplicate_rep_id(self, sleep): representation_vid = Mock(id=1, mimeType="video/mp4", height=720) - representation_aud = Mock(id=1, mimeType="audio/aac") + representation_aud = Mock(id=1, mimeType="audio/aac", lang='en') mpd = Mock(dynamic=False, publishTime=1,
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 1 }, "num_modified_files": 5 }
0.14
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "codecov", "coverage", "mock", "requests-mock", "pynsist", "freezegun", "unittest2" ], "pre_install": [ "apt-get update", "apt-get install -y gcc python3-dev" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 distlib==0.3.9 freezegun==1.2.2 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 iso-639==0.4.5 iso3166==2.1.1 isodate==0.6.1 Jinja2==3.0.3 linecache2==1.0.0 MarkupSafe==2.0.1 mock==5.2.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pycryptodome==3.21.0 pynsist==2.8 pyparsing==3.1.4 PySocks==1.7.1 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 requests==2.27.1 requests-mock==1.12.1 requests_download==0.1.2 six==1.17.0 -e git+https://github.com/streamlink/streamlink.git@6bf654a291e2a792088384c7ba7c9dc9b2a14b1d#egg=streamlink tomli==1.2.3 traceback2==1.4.0 typing_extensions==4.1.1 unittest2==1.1.0 urllib3==1.26.20 websocket-client==1.3.1 yarg==0.1.10 zipp==3.6.0
name: streamlink channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argparse==1.4.0 - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - distlib==0.3.9 - freezegun==1.2.2 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - iso-639==0.4.5 - iso3166==2.1.1 - isodate==0.6.1 - jinja2==3.0.3 - linecache2==1.0.0 - markupsafe==2.0.1 - mock==5.2.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycryptodome==3.21.0 - pynsist==2.8 - pyparsing==3.1.4 - pysocks==1.7.1 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - requests==2.27.1 - requests-download==0.1.2 - requests-mock==1.12.1 - six==1.17.0 - tomli==1.2.3 - traceback2==1.4.0 - typing-extensions==4.1.1 - unittest2==1.1.0 - urllib3==1.26.20 - websocket-client==1.3.1 - yarg==0.1.10 - zipp==3.6.0 prefix: /opt/conda/envs/streamlink
[ "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_multi_lang", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_multi_lang_alpha3", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_multi_lang_locale" ]
[]
[ "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_invalid_lang", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_multi", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_only", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_audio_single", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_drm", "tests/streams/test_dash.py::TestDASHStream::test_parse_manifest_video_only", "tests/streams/test_dash.py::TestDASHStream::test_stream_open_video_audio", "tests/streams/test_dash.py::TestDASHStream::test_stream_open_video_only", "tests/streams/test_dash.py::TestDASHStreamWorker::test_duplicate_rep_id", "tests/streams/test_dash.py::TestDASHStreamWorker::test_dynamic_reload", "tests/streams/test_dash.py::TestDASHStreamWorker::test_static" ]
[]
BSD 2-Clause "Simplified" License
2,768
[ "src/streamlink/stream/hls.py", ".travis.yml", "appveyor.yml", "src/streamlink/utils/l10n.py", "src/streamlink/stream/dash.py" ]
[ "src/streamlink/stream/hls.py", ".travis.yml", "appveyor.yml", "src/streamlink/utils/l10n.py", "src/streamlink/stream/dash.py" ]
AnalogJ__lexicon-264
59a1372a2ba31204f77a8383d0880ba62e0e6607
2018-07-12 11:45:27
59a1372a2ba31204f77a8383d0880ba62e0e6607
diff --git a/lexicon/__main__.py b/lexicon/__main__.py index d674809e..ad243f18 100644 --- a/lexicon/__main__.py +++ b/lexicon/__main__.py @@ -7,6 +7,7 @@ import importlib import logging import os import sys +import json import pkg_resources @@ -19,16 +20,19 @@ logger = logging.getLogger(__name__) def BaseProviderParser(): parser = argparse.ArgumentParser(add_help=False) - parser.add_argument("action", help="specify the action to take", default='list', choices=['create', 'list', 'update', 'delete']) - parser.add_argument("domain", help="specify the domain, supports subdomains as well") - parser.add_argument("type", help="specify the entry type", default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC']) - - parser.add_argument("--name", help="specify the record name") - parser.add_argument("--content", help="specify the record content") - parser.add_argument("--ttl", type=int, help="specify the record time-to-live") - parser.add_argument("--priority", help="specify the record priority") - parser.add_argument("--identifier", help="specify the record for update or delete actions") - parser.add_argument("--log_level", help="specify the log level", default="DEBUG", choices=["CRITICAL","ERROR","WARNING","INFO","DEBUG","NOTSET"]) + parser.add_argument('action', help='specify the action to take', default='list', choices=['create', 'list', 'update', 'delete']) + parser.add_argument('domain', help='specify the domain, supports subdomains as well') + parser.add_argument('type', help='specify the entry type', default='TXT', choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC']) + + parser.add_argument('--name', help='specify the record name') + parser.add_argument('--content', help='specify the record content') + parser.add_argument('--ttl', type=int, help='specify the record time-to-live') + parser.add_argument('--priority', help='specify the record priority') + parser.add_argument('--identifier', help='specify the record for update or delete actions') + parser.add_argument('--log_level', help='specify the log level', default='ERROR', choices=['CRITICAL','ERROR','WARNING','INFO','DEBUG','NOTSET']) + parser.add_argument('--output', + help='specify the type of output: by default a formatted table (TABLE), a formatted table without header (TABLE-NO-HEADER), a JSON string (JSON) or no output (QUIET)', + default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET']) return parser def MainParser(): @@ -43,11 +47,11 @@ def MainParser(): parser = argparse.ArgumentParser(description='Create, Update, Delete, List DNS entries') try: - version = pkg_resources.get_distribution("dns-lexicon").version + version = pkg_resources.get_distribution('dns-lexicon').version except pkg_resources.DistributionNotFound: version = 'unknown' - parser.add_argument('--version', help="show the current version of lexicon", action='version', version='%(prog)s {0}'.format(version)) - parser.add_argument('--delegated', help="specify the delegated domain") + parser.add_argument('--version', help='show the current version of lexicon', action='version', version='%(prog)s {0}'.format(version)) + parser.add_argument('--delegated', help='specify the delegated domain') subparsers = parser.add_subparsers(dest='provider_name', help='specify the DNS provider to use') subparsers.required = True @@ -60,17 +64,73 @@ def MainParser(): return parser -#dynamically determine all the providers available. +# Convert returned JSON into a nice table for command line usage +def generate_table_result(logger, output=None, without_header=None): + try: + _ = (entry for entry in output) + except: + logger.debug('Command output is not iterable, and then cannot be printed with --quiet parameter not enabled.') + return None + + array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output] + + # Insert header (insert before calculating the max width of each column to take headers size into account) + if not without_header: + headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL'] + array.insert(0, headers) + + columnWidths = [0, 0, 0, 0, 0] + # Find max width for each column + for row in array: + for idx, col in enumerate(row): + width = len(str(col)) + if width > columnWidths[idx]: + columnWidths[idx] = width + + # Add a 'nice' separator + if not without_header: + array.insert(1, ['-' * columnWidths[idx] for idx in range(len(columnWidths))]) + + # Construct table to be printed + table = [] + for row in array: + rowList = [] + for idx, col in enumerate(row): + rowList.append(str(col).ljust(columnWidths[idx])) + table.append(' '.join(rowList)) + + # Return table + return '\n'.join(table) + +# Print the relevant output for given output_type +def handle_output(results, output_type): + if not output_type == 'QUIET': + if not output_type == 'JSON': + table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER') + if table: + print(table) + else: + try: + _ = (entry for entry in results) + json_str = json.dumps(results) + if json_str: + print(json_str) + except: + logger.debug('Output is not a JSON, and then cannot be printed with --output=JSON parameter.') + pass + +# Dynamically determine all the providers available. def main(): - parsed_args = MainParser().parse_args() log_level = logging.getLevelName(parsed_args.log_level) logging.basicConfig(stream=sys.stdout, level=log_level, format='%(message)s') logger.debug('Arguments: %s', parsed_args) - client = Client(parsed_args.__dict__) - client.execute() + client = Client(vars(parsed_args)) + + results = client.execute() + handle_output(results, parsed_args.output) if __name__ == '__main__': main()
[CLI] Pretty output for list method Is there any plans to have pretty outputs (table or at least formatted) for the ```list``` operation on the CLI? Right now, the CLI assumes a verbosity of DEBUG level, and outputs the Python representation of the result (managed by the provider). If --log_level=ERROR is used, no output is generated, which defeats the CLI usage, in my opinion. Is this behavior expected? Would you be open to a PR for that?
AnalogJ/lexicon
diff --git a/tests/test_client.py b/tests/test_client.py index 68f5b1f7..d41fc1e7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,6 +1,7 @@ import lexicon.client import pytest import os + def test_Client_init(): options = { 'provider_name':'base', diff --git a/tests/test_output.py b/tests/test_output.py new file mode 100644 index 00000000..fdfae206 --- /dev/null +++ b/tests/test_output.py @@ -0,0 +1,67 @@ +from __future__ import absolute_import + +import sys +import importlib +import logging +import json +import lexicon.__main__ + +from types import ModuleType +from lexicon.providers.base import Provider as BaseProvider + +logger = logging.getLogger(__name__) +data = [ + {'id': 'fake-id', 'type': 'TXT', 'name': 'fake.example.com', 'content': 'fake', 'ttl': 3600}, + {'id': 'fake2-id', 'type': 'TXT', 'name': 'fake2.example.com', 'content': 'fake2', 'ttl': 3600} +] + +# Ensure that stdout corresponds to the given reference output +def assert_correct_output(capsys, expected_output_lines): + out, _ = capsys.readouterr() + assert out.splitlines() == expected_output_lines + +def test_output_function_outputs_json_as_table(capsys): + expected_output_lines = [ + 'ID TYPE NAME CONTENT TTL ', + '-------- ---- ----------------- ------- ----', + 'fake-id TXT fake.example.com fake 3600', + 'fake2-id TXT fake2.example.com fake2 3600', + ] + + lexicon.__main__.handle_output(data, 'TABLE') + assert_correct_output(capsys, expected_output_lines) + +def test_output_function_outputs_json_as_table_with_no_header(capsys): + expected_output_lines = [ + 'fake-id TXT fake.example.com fake 3600', + 'fake2-id TXT fake2.example.com fake2 3600', + ] + + lexicon.__main__.handle_output(data, 'TABLE-NO-HEADER') + assert_correct_output(capsys, expected_output_lines) + +def test_output_function_outputs_json_as_json_string(capsys): + lexicon.__main__.handle_output(data, 'JSON') + + out, _ = capsys.readouterr() + json_data = json.loads(out) + + assert json_data == data + +def test_output_function_output_nothing_when_quiet(capsys): + expected_output_lines = [] + + lexicon.__main__.handle_output(data, 'QUIET') + assert_correct_output(capsys, expected_output_lines) + +def test_output_function_outputs_nothing_with_not_a_json_data(capsys): + expected_output_lines = [] + + lexicon.__main__.handle_output(True, 'TABLE') + assert_correct_output(capsys, expected_output_lines) + + lexicon.__main__.handle_output(True, 'TABLE-NO-HEADER') + assert_correct_output(capsys, expected_output_lines) + + lexicon.__main__.handle_output(True, 'JSON') + assert_correct_output(capsys, expected_output_lines) \ No newline at end of file diff --git a/tests/test_main.py b/tests/test_parser.py similarity index 94% rename from tests/test_main.py rename to tests/test_parser.py index bbfd0d3d..20cd7cac 100644 --- a/tests/test_main.py +++ b/tests/test_parser.py @@ -8,7 +8,7 @@ def test_BaseProviderParser(): assert parsed.domain == 'capsulecd.com' assert parsed.type == 'TXT' assert parsed.ttl == None - + assert parsed.output == 'TABLE' def test_BaseProviderParser_without_domain(): baseparser = lexicon.__main__.BaseProviderParser() @@ -27,6 +27,7 @@ def test_MainParser(): assert parsed.action == 'list' assert parsed.domain == 'capsulecd.com' assert parsed.type == 'TXT' + assert parsed.output == 'TABLE' def test_MainParser_without_args(): baseparser = lexicon.__main__.MainParser()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
2.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "python-coveralls" ], "pre_install": [ "apt-get update", "apt-get install -y gcc cron rsyslog" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 coverage==7.2.7 -e git+https://github.com/AnalogJ/lexicon.git@59a1372a2ba31204f77a8383d0880ba62e0e6607#egg=dns_lexicon exceptiongroup==1.2.2 filelock==3.12.2 future==1.0.0 idna==3.10 importlib-metadata==6.7.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 pytest-cov==4.1.0 python-coveralls==2.9.3 PyYAML==6.0.1 requests==2.31.0 requests-file==2.1.0 six==1.17.0 tldextract==4.0.0 tomli==2.0.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp==3.15.0
name: lexicon channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - coverage==7.2.7 - exceptiongroup==1.2.2 - filelock==3.12.2 - future==1.0.0 - idna==3.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-cov==4.1.0 - python-coveralls==2.9.3 - pyyaml==6.0.1 - requests==2.31.0 - requests-file==2.1.0 - six==1.17.0 - tldextract==4.0.0 - tomli==2.0.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - zipp==3.15.0 prefix: /opt/conda/envs/lexicon
[ "tests/test_output.py::test_output_function_outputs_json_as_table", "tests/test_output.py::test_output_function_outputs_json_as_table_with_no_header", "tests/test_output.py::test_output_function_outputs_json_as_json_string", "tests/test_output.py::test_output_function_output_nothing_when_quiet", "tests/test_output.py::test_output_function_outputs_nothing_with_not_a_json_data", "tests/test_parser.py::test_BaseProviderParser", "tests/test_parser.py::test_MainParser" ]
[]
[ "tests/test_client.py::test_Client_init", "tests/test_client.py::test_Client_init_when_domain_includes_subdomain_should_strip", "tests/test_client.py::test_Client_init_with_delegated_domain_name", "tests/test_client.py::test_Client_init_with_delegated_domain_fqdn", "tests/test_client.py::test_Client_init_with_same_delegated_domain_fqdn", "tests/test_client.py::test_Client_init_when_missing_provider_should_fail", "tests/test_client.py::test_Client_init_when_missing_action_should_fail", "tests/test_client.py::test_Client_init_when_missing_domain_should_fail", "tests/test_client.py::test_Client_init_when_missing_type_should_fail", "tests/test_client.py::test_Client_parse_env_with_no_keys_should_do_nothing", "tests/test_client.py::test_Client_parse_env_with_auth_keys", "tests/test_parser.py::test_BaseProviderParser_without_domain", "tests/test_parser.py::test_BaseProviderParser_without_options", "tests/test_parser.py::test_MainParser_without_args" ]
[]
MIT License
2,769
[ "lexicon/__main__.py" ]
[ "lexicon/__main__.py" ]
wright-group__WrightTools-665
32e4571ed7acb3c1b7588d5857785d9d91d3bd18
2018-07-12 21:12:08
6e0c301b1f703527709a2669bbde785255254239
pep8speaks: Hello @darienmorrow! Thanks for submitting the PR. - In the file [`WrightTools/kit/_calculate.py`](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py), following are the PEP8 issues : > [Line 21:67](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L21): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace > [Line 24:1](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L24): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace > [Line 44:61](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L44): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace > [Line 45:73](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L45): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace > [Line 51:1](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L51): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace > [Line 56:40](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L56): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 60:34](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L60): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 61:17](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L61): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 62:22](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L62): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 64:58](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L64): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 65:30](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L65): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 66:30](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L66): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 67:25](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L67): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 69:78](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L69): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 70:24](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L70): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 71:27](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L71): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment > [Line 76:1](https://github.com/wright-group/WrightTools/blob/4e15e503274465ef90aaa870da00ab2f112e4de1/WrightTools/kit/_calculate.py#L76): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace ksunden: we may wish to look at `unyt` to simplify the computation here, perhaps. I'm considering replacing the custom code in the `units` package with that library, which has some potential positive side effects ksunden: Also, the pep8 warning still mentioned above are all in the docstring (which is why `black` didn't catch it) I believe `autopep8` will catch that if you don't feel like tracking those down manually ksunden: https://github.com/ambv/black/issues/318 (`black` will NOT handle this case, as there exist cases [perhaps poorly reasoned, but still] where doing so will affect correctness of the program, and `black` commits to not making any such changes) darienmorrow: @ksunden The reason I am not checking the value is because that requires me to be 100% sure about the correct value of the calculation. I am waiting for someone to check my math before I do that. darienmorrow: @ksunden The reason I do not check for the value in my test is because checking for the value requires me to be absolutely sure about the result of my calculation. I was hoping for someone to check my math before I specify values. darienmorrow: @ksunden I now check the value in the test. ksunden: Im holding off rereviewing until the math itself is approved, I would like to check thebither two outputs as well in the test, please.
diff --git a/WrightTools/data/_data.py b/WrightTools/data/_data.py index a48cf49..1124658 100644 --- a/WrightTools/data/_data.py +++ b/WrightTools/data/_data.py @@ -625,7 +625,7 @@ class Data(Group): New data object with the downscaled channels and axes """ if name is None: - name = self.natural_name + '_downscaled' + name = self.natural_name + "_downscaled" if parent is None: newdata = Data(name=name) else: @@ -633,13 +633,13 @@ class Data(Group): for channel in self.channels: name = channel.natural_name - newdata.create_channel(name=name, - values=downscale_local_mean(channel[:], tup), - units=channel.units) + newdata.create_channel( + name=name, values=downscale_local_mean(channel[:], tup), units=channel.units + ) args = [] for i, axis in enumerate(self.axes): if len(axis.variables) > 1: - raise NotImplementedError('downscale only works with simple axes currently') + raise NotImplementedError("downscale only works with simple axes currently") variable = axis.variables[0] name = variable.natural_name args.append(name) diff --git a/WrightTools/kit/_calculate.py b/WrightTools/kit/_calculate.py index 3af381c..16ef7d2 100644 --- a/WrightTools/kit/_calculate.py +++ b/WrightTools/kit/_calculate.py @@ -1,7 +1,7 @@ """Calculate.""" -# --- import -------------------------------------------------------------------------------------- +# --- import ------------------------------------------------------------- import numpy as np @@ -9,13 +9,78 @@ import numpy as np from .. import units as wt_units -# --- define -------------------------------------------------------------------------------------- +# --- define ------------------------------------------------------------- -__all__ = ["mono_resolution", "nm_width", "symmetric_sqrt"] +__all__ = ["fluence", "mono_resolution", "nm_width", "symmetric_sqrt"] -# --- functions ----------------------------------------------------------------------------------- +# --- functions ---------------------------------------------------------- + + +def fluence( + power_mW, + color, + beam_radius, + reprate_Hz, + pulse_width, + color_units="wn", + beam_radius_units="mm", + pulse_width_units="fs_t", + area_type="even", +): + """Calculate the fluence of a beam. + + Parameters + ---------- + power_mW : number + Time integrated power of beam. + color : number + Color of beam in units. + beam_radius : number + Radius of beam in units. + reprate_Hz : number + Laser repetition rate in inverse seconds (Hz). + pulse_width : number + Pulsewidth of laser in units + color_units : string (optional) + Valid wt.units color unit identifier. Default is wn. + beam_radius_units : string (optional) + Valid wt.units distance unit identifier. Default is mm. + pulse_width_units : number + Valid wt.units time unit identifier. Default is fs. + area_type : string (optional) + Type of calculation to accomplish for Gaussian area. + Currently nothing other than the default of even is implemented. + + Returns + ------- + tuple + Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2 + + """ + # calculate beam area + if area_type == "even": + radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm") + area_cm2 = np.pi * radius_cm ** 2 # cm^2 + else: + raise NotImplementedError + # calculate fluence in uj/cm^2 + ujcm2 = power_mW / reprate_Hz # mJ + ujcm2 *= 1e3 # uJ + ujcm2 /= area_cm2 # uJ/cm^2 + # calculate fluence in photons/cm^2 + energy = wt_units.converter(color, color_units, "eV") # eV + photonscm2 = ujcm2 * 1e-6 # J/cm2 + photonscm2 /= 1.60218e-19 # eV/cm2 + photonscm2 /= energy # photons/cm2 + # calculate peak intensity in GW/cm^2 + pulse_width_s = wt_units.converter(pulse_width, pulse_width_units, "s_t") # seconds + GWcm2 = ujcm2 / 1e6 # J/cm2 + GWcm2 /= pulse_width_s # W/cm2 + GWcm2 /= 1e9 + # finish + return ujcm2, photonscm2, GWcm2 def mono_resolution(grooves_per_mm, slit_width, focal_length, output_color, output_units="wn"):
Fluence calculation I oftentimes find myself needing to calculate a fluence from a power reading. I then go through ten minutes of unit conversions to do the calculation. I think we should make a function in `kit._calculate.py` that does this calculation. For instance: ``` def fluence(power_mW, reprate_Hz, photon_energy_eV, beam_radius_mm): return uj/cm2, photons/cm2 ``` Any thoughts?
wright-group/WrightTools
diff --git a/tests/kit/fluence.py b/tests/kit/fluence.py new file mode 100644 index 0000000..5355d64 --- /dev/null +++ b/tests/kit/fluence.py @@ -0,0 +1,20 @@ +"""Test fluence.""" + + +# --- import ------------------------------------------------------------- + + +import numpy as np + +import WrightTools as wt + + +# --- test --------------------------------------------------------------- + + +def test_0(): + out = wt.kit.fluence(1, 2, .1, 1000, 1, "eV", "cm", "ps_t") + checks = (31.83098, 99336493460095.2, 0.03183098) + assert np.isclose(checks[0], out[0], rtol=1e-3) + assert np.isclose(checks[1], out[1], rtol=1e-3) + assert np.isclose(checks[2], out[2], rtol=1e-3)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
3.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y libfreetype6-dev libopenblas-dev" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
black==25.1.0 cfgv==3.4.0 click==8.1.8 contourpy==1.3.0 coverage==7.8.0 cycler==0.12.1 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 fonttools==4.56.0 h5py==3.13.0 identify==2.6.9 imageio==2.37.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 lazy_loader==0.4 matplotlib==3.9.4 mypy-extensions==1.0.0 networkx==3.2.1 nodeenv==1.9.1 numexpr==2.10.2 numpy==2.0.2 packaging==24.2 pathspec==0.12.1 pillow==11.1.0 platformdirs==4.3.7 pluggy==1.5.0 pre_commit==4.2.0 pydocstyle==6.3.0 pyparsing==3.2.3 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 scikit-image==0.24.0 scipy==1.13.1 six==1.17.0 snowballstemmer==2.2.0 swebench_matterhorn @ file:///swebench_matterhorn tidy_headers==1.0.4 tifffile==2024.8.30 tomli==2.2.1 typing_extensions==4.13.0 virtualenv==20.29.3 -e git+https://github.com/wright-group/WrightTools.git@32e4571ed7acb3c1b7588d5857785d9d91d3bd18#egg=WrightTools zipp==3.21.0
name: WrightTools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - black==25.1.0 - cfgv==3.4.0 - click==8.1.8 - contourpy==1.3.0 - coverage==7.8.0 - cycler==0.12.1 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - fonttools==4.56.0 - h5py==3.13.0 - identify==2.6.9 - imageio==2.37.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - lazy-loader==0.4 - matplotlib==3.9.4 - mypy-extensions==1.0.0 - networkx==3.2.1 - nodeenv==1.9.1 - numexpr==2.10.2 - numpy==2.0.2 - packaging==24.2 - pathspec==0.12.1 - pillow==11.1.0 - platformdirs==4.3.7 - pluggy==1.5.0 - pre-commit==4.2.0 - pydocstyle==6.3.0 - pyparsing==3.2.3 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - scikit-image==0.24.0 - scipy==1.13.1 - six==1.17.0 - snowballstemmer==2.2.0 - swebench-matterhorn==0.0.0 - tidy-headers==1.0.4 - tifffile==2024.8.30 - tomli==2.2.1 - typing-extensions==4.13.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/WrightTools
[ "tests/kit/fluence.py::test_0" ]
[]
[]
[]
MIT License
2,770
[ "WrightTools/kit/_calculate.py", "WrightTools/data/_data.py" ]
[ "WrightTools/kit/_calculate.py", "WrightTools/data/_data.py" ]
hgrecco__pint-652
0c4a9503b9fe00a62a90c33cb49cb047784766d3
2018-07-13 07:23:40
bc754ae302b0c03d1802daddcd76c103a5fdfb67
diff --git a/docs/wrapping.rst b/docs/wrapping.rst index a4913b3..dd9a39f 100644 --- a/docs/wrapping.rst +++ b/docs/wrapping.rst @@ -148,6 +148,37 @@ airmass:: def solar_position(lat, lon, press, tamb, timestamp): return zenith, azimuth, airmass +Optional arguments +------------------ + +For a function with named keywords with optional values, use a tuple for all +arguments: + +.. doctest:: + + >>> @ureg.wraps(ureg.second, (ureg.meters, ureg.meters/ureg.second**2)) + ... def calculate_time_to_fall(height, gravity=Q_(9.8, 'm/s^2'), verbose=False): + ... """Calculate time to fall from a height h. + ... + ... By default, the gravity is assumed to be earth gravity, + ... but it can be modified. + ... + ... d = .5 * g * t**2 + ... t = sqrt(2 * d / g) + ... """ + ... t = sqrt(2 * height / gravity) + ... if verbose: print(str(t) + " seconds to fall") + ... return t + ... + >>> lunar_module_height = Q_(22, 'feet') + Q_(11, 'inches') + >>> calculate_time_to_fall(lunar_module_height, verbose=True) + 1.1939473204801092 seconds to fall + <Quantity(1.1939473204801092, 'second')> + >>> + >>> moon_gravity = Q_(1.625, 'm/s^2') + >>> tcalculate_time_to_fall(lunar_module_height, moon_gravity) + <Quantity(2.932051001760214, 'second')> + Specifying relations between arguments -------------------------------------- @@ -171,6 +202,19 @@ You can use more than one label: ... def some_function(x, y, z): ... pass +With optional arguments + +.. doctest:: + + >>> @ureg.wraps('=A*B', ('=A', '=B')) + ... def get_displacement(time, rate=Q_(1, 'm/s')): + ... return time * rate + ... + >>> get_displacement(Q_(2, 's')) + <Quantity(2, 'meter')> + >>> get_displacement(Q_(2, 's'), Q_(1, 'deg/s')) + <Quantity(2, 'degree')> + Ignoring an argument or return value ------------------------------------ diff --git a/pint/registry_helpers.py b/pint/registry_helpers.py index 1105038..4a8b862 100644 --- a/pint/registry_helpers.py +++ b/pint/registry_helpers.py @@ -15,6 +15,12 @@ from .compat import string_types, zip_longest from .errors import DimensionalityError from .util import to_units_container, UnitsContainer +try: + from inspect import signature +except ImportError: + # Python2 does not have the inspect library. Import the backport. + from funcsigs import signature + def _replace_units(original_units, values_by_name): """Convert a unit compatible type to a UnitsContainer. @@ -165,6 +171,19 @@ def wraps(ureg, ret, args, strict=True): @functools.wraps(func, assigned=assigned, updated=updated) def wrapper(*values, **kw): + + # Named keywords may have been left blank. Wherever the named keyword is blank, + # fill it in with the default value. + sig = signature(func) + bound_arguments = sig.bind(*values, **kw) + + for param in sig.parameters.values(): + if param.name not in bound_arguments.arguments: + bound_arguments.arguments[param.name] = param.default + + values = [bound_arguments.arguments[key] for key in sig.parameters.keys()] + kw = {} + # In principle, the values are used as is # When then extract the magnitudes when needed. new_values, values_by_name = converter(ureg, values, strict) diff --git a/setup.py b/setup.py index 354a79d..1bcae3c 100644 --- a/setup.py +++ b/setup.py @@ -60,4 +60,10 @@ setup( 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', - ]) + ], + extras_require={ + ':python_version == "2.7"': [ + 'funcsigs', + ], + }, + )
Wraps crashes when using the same unit for different inputvalues When I have this function: ``` @ureg.wraps(ureg.M, (ureg.degC, ureg.M, ureg.M)) def hco3_eq_from_tillmans_phreeqc(temperature, CO2, NaCl_conc=__NaCl_conc): pass ``` And call it as: ``` temp = ureg.Quantity(12, ureg.degC) print(mdl.hco3_eq_from_tillmans_phreeqc(temp, 1*ureg.mM, NaCl_conc = 0.1 * ureg.mM)) ``` Note how mM=mmol/L, a unit I added to my It gives me the following error: ``` Traceback (most recent call last): File "C:\Users\korevma\.vscode\extensions\ms-python.python-2018.2.1\pythonFiles\PythonTools\visualstudio_py_launcher_nodebug.py", line 74, in run _vspu.exec_file(file, globals_obj) File "C:\Users\korevma\.vscode\extensions\ms-python.python-2018.2.1\pythonFiles\PythonTools\visualstudio_py_util.py", line 119, in exec_file exec_code(code, file, global_variables) File "C:\Users\korevma\.vscode\extensions\ms-python.python-2018.2.1\pythonFiles\PythonTools\visualstudio_py_util.py", line 95, in exec_code exec(code_obj, global_variables) ``` After removing the third argument, the wrapping works fine. Is it because the same unit is used twice? Does it have something to do with the usage of `unit_args_ndx` in the `_parse_wrap_args` function? Im am using python 3
hgrecco/pint
diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 158fbe1..0cefce9 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -571,3 +571,57 @@ class TestIssuesNP(QuantityTestCase): self.assertEqual(f(ureg.Quantity(1, '')), 2) self.assertRaises(DimensionalityError, f, ureg.Quantity(1, 'm')) + + def test_issue625a(self): + try: + from inspect import signature + except ImportError: + # Python2 does not have the inspect library. Import the backport. + from funcsigs import signature + + ureg = UnitRegistry() + Q_ = ureg.Quantity + from math import sqrt + + @ureg.wraps(ureg.second, (ureg.meters, ureg.meters/ureg.second**2)) + def calculate_time_to_fall(height, gravity=Q_(9.8, 'm/s^2')): + """Calculate time to fall from a height h with a default gravity. + + By default, the gravity is assumed to be earth gravity, + but it can be modified. + + d = .5 * g * t**2 + t = sqrt(2 * d / g) + """ + return sqrt(2 * height / gravity) + + lunar_module_height = Q_(10, 'm') + t1 = calculate_time_to_fall(lunar_module_height) + print(t1) + self.assertAlmostEqual(t1, Q_(1.4285714285714286, 's')) + + moon_gravity = Q_(1.625, 'm/s^2') + t2 = calculate_time_to_fall(lunar_module_height, moon_gravity) + self.assertAlmostEqual(t2, Q_(3.508232077228117, 's')) + + def test_issue625b(self): + try: + from inspect import signature + except ImportError: + # Python2 does not have the inspect library. Import the backport. + from funcsigs import signature + + ureg = UnitRegistry() + Q_ = ureg.Quantity + + @ureg.wraps('=A*B', ('=A', '=B')) + def get_displacement(time, rate=Q_(1, 'm/s')): + """Calculates displacement from a duration and default rate. + """ + return time * rate + + d1 = get_displacement(Q_(2, 's')) + self.assertAlmostEqual(d1, Q_(2, 'm')) + + d2 = get_displacement(Q_(2, 's'), Q_(1, 'deg/s')) + self.assertAlmostEqual(d2, Q_(2,' deg'))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "matplotlib>=2.0.0", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
contourpy==1.3.0 cycler==0.12.1 exceptiongroup==1.2.2 fonttools==4.56.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 matplotlib==3.9.4 numpy==2.0.2 packaging==24.2 pillow==11.1.0 -e git+https://github.com/hgrecco/pint.git@0c4a9503b9fe00a62a90c33cb49cb047784766d3#egg=Pint pluggy==1.5.0 pyparsing==3.2.3 pytest==8.3.5 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.2.1 zipp==3.21.0
name: pint channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - contourpy==1.3.0 - cycler==0.12.1 - exceptiongroup==1.2.2 - fonttools==4.56.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - matplotlib==3.9.4 - numpy==2.0.2 - packaging==24.2 - pillow==11.1.0 - pluggy==1.5.0 - pyparsing==3.2.3 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 - zipp==3.21.0 prefix: /opt/conda/envs/pint
[ "pint/testsuite/test_issues.py::TestIssuesNP::test_issue625a", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue625b" ]
[]
[ "pint/testsuite/test_issues.py::TestIssues::test_alternative_angstrom_definition", "pint/testsuite/test_issues.py::TestIssues::test_angstrom_creation", "pint/testsuite/test_issues.py::TestIssues::test_issue104", "pint/testsuite/test_issues.py::TestIssues::test_issue105", "pint/testsuite/test_issues.py::TestIssues::test_issue121", "pint/testsuite/test_issues.py::TestIssues::test_issue170", "pint/testsuite/test_issues.py::TestIssues::test_issue29", "pint/testsuite/test_issues.py::TestIssues::test_issue52", "pint/testsuite/test_issues.py::TestIssues::test_issue523", "pint/testsuite/test_issues.py::TestIssues::test_issue54", "pint/testsuite/test_issues.py::TestIssues::test_issue54_related", "pint/testsuite/test_issues.py::TestIssues::test_issue61", "pint/testsuite/test_issues.py::TestIssues::test_issue66", "pint/testsuite/test_issues.py::TestIssues::test_issue66b", "pint/testsuite/test_issues.py::TestIssues::test_issue69", "pint/testsuite/test_issues.py::TestIssues::test_issue85", "pint/testsuite/test_issues.py::TestIssues::test_issue86", "pint/testsuite/test_issues.py::TestIssues::test_issue93", "pint/testsuite/test_issues.py::TestIssues::test_issues86b", "pint/testsuite/test_issues.py::TestIssues::test_micro_creation", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue121", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue127", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue171_T", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue171_real_imag", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue250", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue252", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue323", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue339", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue354_356_370", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue44", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue45", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue45b", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue468", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue482", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue483", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue50", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue532", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue62", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue74", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue75", "pint/testsuite/test_issues.py::TestIssuesNP::test_issue93" ]
[]
BSD
2,771
[ "docs/wrapping.rst", "pint/registry_helpers.py", "setup.py" ]
[ "docs/wrapping.rst", "pint/registry_helpers.py", "setup.py" ]
netromdk__vermin-8
653670b33ca9a54c95bd1ae06c56ce7474197a82
2018-07-13 09:54:23
8117060742e82c173b0b21b15c177464078775dd
coveralls: [![Coverage Status](https://coveralls.io/builds/17970201/badge)](https://coveralls.io/builds/17970201) Coverage decreased (-0.4%) to 94.631% when pulling **1598d1fcf288080ba827c4736cf995330f9f3256 on Darkheir:pathlib_import** into **653670b33ca9a54c95bd1ae06c56ce7474197a82 on netromdk:master**. Darkheir: Hum the check is failing on python 3.3 because its support has been dropped in wheel ``` wheel requires Python '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*' but the running Python is 3.3.6 ``` netromdk: Thanks for adding this, @Darkheir! If you wish to provide further, I see that there were things added/changed to `pathlib` in versions 3.5, 3.6, and 3.7 wrt. functions and parameters.
diff --git a/.travis.yml b/.travis.yml index ef67b64..2726025 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,8 +16,8 @@ python: install: - pip install -U pip virtualenv script: -- if [[ $TRAVIS_PYTHON_VERSION != 3.2 ]]; then make setup-venv setup-coverage test-coverage; else make test; fi -- if [[ $TRAVIS_PYTHON_VERSION > 3.2 ]]; then make setup-misc; source .venv/bin/activate; make check; fi -- if [[ $TRAVIS_PYTHON_VERSION > 3.2 && $TRAVIS_PYTHON_VERSION < 3.7 ]]; then make setup-bandit; source .venv/bin/activate; make security-check; fi +- if [[ $TRAVIS_PYTHON_VERSION != 3.2 && $TRAVIS_PYTHON_VERSION != 3.3 ]]; then make setup-venv setup-coverage test-coverage; else make test; fi +- if [[ $TRAVIS_PYTHON_VERSION > 3.3 ]]; then make setup-misc; source .venv/bin/activate; make check; fi +- if [[ $TRAVIS_PYTHON_VERSION > 3.3 && $TRAVIS_PYTHON_VERSION < 3.7 ]]; then make setup-bandit; source .venv/bin/activate; make security-check; fi after_success: -- if [[ $TRAVIS_PYTHON_VERSION != 3.2 ]]; then make coveralls; fi +- if [[ $TRAVIS_PYTHON_VERSION != 3.2 && $TRAVIS_PYTHON_VERSION != 3.3 ]]; then make coveralls; fi diff --git a/vermin/rules.py b/vermin/rules.py index 947d5b9..d54363e 100644 --- a/vermin/rules.py +++ b/vermin/rules.py @@ -75,6 +75,7 @@ MOD_REQS = { "numbers": (2.6, 3.0), "optparse": (2.3, 3.0), "ossaudiodev": (2.3, 3.0), + "pathlib": (None, 3.4), "pickletools": (2.3, 3.0), "pkgutil": (2.3, 3.0), "platform": (2.3, 3.0),
pathlib module missing in checks **Describe the bug** The pathlib module is available since python 3.4 and is not in the checks **To Reproduce** Code using the pathlib module not having a minimum version of 3.4 **Expected behavior** The minimum version should then be python 3.4 and no python 2 support **Environment (please complete the following information):** Vermin 0.4.4
netromdk/vermin
diff --git a/tests/module.py b/tests/module.py index b8316fa..9900459 100644 --- a/tests/module.py +++ b/tests/module.py @@ -349,3 +349,6 @@ class VerminModuleTests(VerminTest): def test_venv(self): self.assertOnlyIn(3.3, detect("import venv")) + + def test_pathlib(self): + self.assertOnlyIn(3.4, detect("import pathlib"))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work -e git+https://github.com/netromdk/vermin.git@653670b33ca9a54c95bd1ae06c56ce7474197a82#egg=vermin zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: vermin channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/vermin
[ "tests/module.py::VerminModuleTests::test_pathlib" ]
[]
[ "tests/module.py::VerminModuleTests::test_ConfigParser", "tests/module.py::VerminModuleTests::test_DocXMLRPCServer", "tests/module.py::VerminModuleTests::test_HTMLParser", "tests/module.py::VerminModuleTests::test_Queue", "tests/module.py::VerminModuleTests::test_SimpleXMLRPCServer", "tests/module.py::VerminModuleTests::test_SocketServer", "tests/module.py::VerminModuleTests::test_Tkinter", "tests/module.py::VerminModuleTests::test___builtin__", "tests/module.py::VerminModuleTests::test___future__", "tests/module.py::VerminModuleTests::test__dummy_thread", "tests/module.py::VerminModuleTests::test__markupbase", "tests/module.py::VerminModuleTests::test__winreg", "tests/module.py::VerminModuleTests::test_abc", "tests/module.py::VerminModuleTests::test_argparse", "tests/module.py::VerminModuleTests::test_ast", "tests/module.py::VerminModuleTests::test_asyncio", "tests/module.py::VerminModuleTests::test_atexit", "tests/module.py::VerminModuleTests::test_builtins", "tests/module.py::VerminModuleTests::test_bz2", "tests/module.py::VerminModuleTests::test_cProfile", "tests/module.py::VerminModuleTests::test_cgitb", "tests/module.py::VerminModuleTests::test_collections", "tests/module.py::VerminModuleTests::test_configparser", "tests/module.py::VerminModuleTests::test_contextlib", "tests/module.py::VerminModuleTests::test_cookielib", "tests/module.py::VerminModuleTests::test_copy_reg", "tests/module.py::VerminModuleTests::test_copyreg", "tests/module.py::VerminModuleTests::test_csv", "tests/module.py::VerminModuleTests::test_ctypes", "tests/module.py::VerminModuleTests::test_datetime", "tests/module.py::VerminModuleTests::test_dbm_io", "tests/module.py::VerminModuleTests::test_dbm_ndbm", "tests/module.py::VerminModuleTests::test_dbm_os", "tests/module.py::VerminModuleTests::test_dbm_struct", "tests/module.py::VerminModuleTests::test_dbm_sys", "tests/module.py::VerminModuleTests::test_dbm_whichdb", "tests/module.py::VerminModuleTests::test_decimal", "tests/module.py::VerminModuleTests::test_difflib", "tests/module.py::VerminModuleTests::test_dummy_thread", "tests/module.py::VerminModuleTests::test_dummy_threading", "tests/module.py::VerminModuleTests::test_email", "tests/module.py::VerminModuleTests::test_email_charset", "tests/module.py::VerminModuleTests::test_email_contentmanager", "tests/module.py::VerminModuleTests::test_email_header", "tests/module.py::VerminModuleTests::test_email_headerregistry", "tests/module.py::VerminModuleTests::test_email_policy", "tests/module.py::VerminModuleTests::test_faulthandler", "tests/module.py::VerminModuleTests::test_fractions", "tests/module.py::VerminModuleTests::test_functools", "tests/module.py::VerminModuleTests::test_future_builtins", "tests/module.py::VerminModuleTests::test_hashlib", "tests/module.py::VerminModuleTests::test_heapq", "tests/module.py::VerminModuleTests::test_hmac", "tests/module.py::VerminModuleTests::test_hotshot", "tests/module.py::VerminModuleTests::test_html", "tests/module.py::VerminModuleTests::test_htmlentitydefs", "tests/module.py::VerminModuleTests::test_http", "tests/module.py::VerminModuleTests::test_http_cookiejar", "tests/module.py::VerminModuleTests::test_importlib", "tests/module.py::VerminModuleTests::test_inspect", "tests/module.py::VerminModuleTests::test_io", "tests/module.py::VerminModuleTests::test_ipaddress", "tests/module.py::VerminModuleTests::test_itertools", "tests/module.py::VerminModuleTests::test_json", "tests/module.py::VerminModuleTests::test_logging", "tests/module.py::VerminModuleTests::test_lzma", "tests/module.py::VerminModuleTests::test_markupbase", "tests/module.py::VerminModuleTests::test_md5", "tests/module.py::VerminModuleTests::test_modulefinder", "tests/module.py::VerminModuleTests::test_msilib", "tests/module.py::VerminModuleTests::test_multiprocessing", "tests/module.py::VerminModuleTests::test_new", "tests/module.py::VerminModuleTests::test_numbers", "tests/module.py::VerminModuleTests::test_optparse", "tests/module.py::VerminModuleTests::test_ossaudiodev", "tests/module.py::VerminModuleTests::test_pickletools", "tests/module.py::VerminModuleTests::test_pkgutil", "tests/module.py::VerminModuleTests::test_platform", "tests/module.py::VerminModuleTests::test_pydoc", "tests/module.py::VerminModuleTests::test_queue", "tests/module.py::VerminModuleTests::test_repr", "tests/module.py::VerminModuleTests::test_reprlib", "tests/module.py::VerminModuleTests::test_runpy", "tests/module.py::VerminModuleTests::test_secrets", "tests/module.py::VerminModuleTests::test_sets", "tests/module.py::VerminModuleTests::test_shlex", "tests/module.py::VerminModuleTests::test_socketserver", "tests/module.py::VerminModuleTests::test_spwd", "tests/module.py::VerminModuleTests::test_sqlite3", "tests/module.py::VerminModuleTests::test_ssl", "tests/module.py::VerminModuleTests::test_string_letters", "tests/module.py::VerminModuleTests::test_string_lowercase", "tests/module.py::VerminModuleTests::test_string_uppercase", "tests/module.py::VerminModuleTests::test_stringprep", "tests/module.py::VerminModuleTests::test_subprocess", "tests/module.py::VerminModuleTests::test_sysconfig", "tests/module.py::VerminModuleTests::test_tarfile", "tests/module.py::VerminModuleTests::test_textwrap", "tests/module.py::VerminModuleTests::test_timeit", "tests/module.py::VerminModuleTests::test_tkinter", "tests/module.py::VerminModuleTests::test_tracemalloc", "tests/module.py::VerminModuleTests::test_typing", "tests/module.py::VerminModuleTests::test_unittest", "tests/module.py::VerminModuleTests::test_unittest_mock", "tests/module.py::VerminModuleTests::test_urllib2", "tests/module.py::VerminModuleTests::test_uuid", "tests/module.py::VerminModuleTests::test_venv", "tests/module.py::VerminModuleTests::test_warnings", "tests/module.py::VerminModuleTests::test_weakref", "tests/module.py::VerminModuleTests::test_winreg", "tests/module.py::VerminModuleTests::test_wsgiref", "tests/module.py::VerminModuleTests::test_xmlrpc", "tests/module.py::VerminModuleTests::test_xmlrpc_client", "tests/module.py::VerminModuleTests::test_xmlrpc_server", "tests/module.py::VerminModuleTests::test_xmlrpclib", "tests/module.py::VerminModuleTests::test_zipimport" ]
[]
MIT License
2,773
[ ".travis.yml", "vermin/rules.py" ]
[ ".travis.yml", "vermin/rules.py" ]
HECBioSim__Longbow-106
7e8d4e78e2e0590083c8a5630c24d175ba49ce48
2018-07-13 19:59:43
c81fcaccfa7fb2dc147e40970ef806dc6d6b22a4
diff --git a/longbow/applications.py b/longbow/applications.py index 8f99c1e..b2ee695 100644 --- a/longbow/applications.py +++ b/longbow/applications.py @@ -82,7 +82,7 @@ def checkapp(jobs): LOG.info("Testing the executables defined for each job.") - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: # If we haven't checked this resource then it is likely not in the dict if jobs[job]["resource"] not in checked: @@ -143,7 +143,7 @@ def processjobs(jobs): LOG.info("Processing job/s and detecting files that require upload.") # Process each job. - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: filelist = [] appplugins = getattr(apps, "PLUGINEXECS") @@ -166,7 +166,7 @@ def processjobs(jobs): "not supported".format(job)) # If we have multiple jobs. - if len(jobs) > 1: + if len([a for a in jobs if "lbowconf" not in a]) > 1: # Add the job name to the path. jobs[job]["localworkdir"] = os.path.join( diff --git a/longbow/configuration.py b/longbow/configuration.py index c8af848..432f2d8 100644 --- a/longbow/configuration.py +++ b/longbow/configuration.py @@ -409,14 +409,13 @@ def saveini(inifile, params): ini = open(inifile, "w") - for section in params: + for obj in params: - ini.write("[" + str(section) + "]\n") + ini.write("[" + str(obj) + "]\n") - for option in params[section]: + for opt in params[obj]: - ini.write(str(option) + " = " + str(params[section][option]) + - "\n") + ini.write(str(opt) + " = " + str(params[obj][opt]) + "\n") ini.write("\n") @@ -429,7 +428,7 @@ def _processconfigsfinalinit(jobs): modules = getattr(apps, "PLUGINEXECS") modules[""] = "" - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: # This is just for logging messages. jobs[job]["jobname"] = job @@ -463,10 +462,13 @@ def _processconfigsfinalinit(jobs): LOG.debug("Job '%s' will be run in the '%s' directory on the remote " "resource.", job, jobs[job]["destdir"]) - # Create a recovery file. - jobs[job]["recoveryfile"] = ( - os.path.join(os.path.expanduser('~/.longbow'), "recovery-" + - time.strftime("%Y%m%d-%H%M%S"))) + # Create a recovery file. + if "lbowconf" not in jobs: + + jobs["lbowconf"] = {} + + jobs["lbowconf"]["recoveryfile"] = ( + "recovery-" + time.strftime("%Y%m%d-%H%M%S")) def _processconfigsparams(jobs, parameters, jobdata, hostdata): @@ -498,9 +500,10 @@ def _processconfigsparams(jobs, parameters, jobdata, hostdata): def _processconfigsresource(parameters, jobdata, hostsections): """Check which HPC each job should use.""" - # Initialise. + # Initialise jobs = {} + # Process resource/s for job/s. for job in jobdata: @@ -573,7 +576,7 @@ def _processconfigsvalidate(jobs): "specifying a value for it in the configuration file." } # Check parameters that are required for running jobs are provided. - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: # Validate required parameters have been set. for validationitem in required: diff --git a/longbow/entrypoints.py b/longbow/entrypoints.py index d67b351..bddf6cc 100644 --- a/longbow/entrypoints.py +++ b/longbow/entrypoints.py @@ -110,44 +110,30 @@ def launcher(): "recover": "", "resource": "", "replicates": "", + "update": "", "verbose": False } # Specify all recognised longbow arguments alllongbowargs = [ - "-about", "--about", - "-debug", "--debug", - "-disconnect", "--disconnect", - "-examples", "--examples", "-h", - "-help", "--help", - "-hosts", "--hosts", - "-job", "--job", - "-jobname", "--jobname", - "-log", "--log", - "-maxtime", "--maxtime", - "-nochecks", "--nochecks", - "-recover", "--recover", - "-resource", "--resource", - "-replicates", "--replicates", + "--update", "-V", - "-verbose", "--verbose", - "-version", "--version" ] @@ -209,7 +195,7 @@ def launcher(): # If no executable and jobfile has been given then fail. if (parameters["executable"] == "" and parameters["job"] == "" and - parameters["recover"] == ""): + parameters["recover"] == "" and parameters["update"] == ""): raise exceptions.RequiredinputError( "There was no executable or job file given on the " @@ -219,19 +205,108 @@ def launcher(): # --------------------------------------------------------------------- # Call one of the main methods at the top level of the library. - # Are we trying to recover or are we running as normal. - if parameters["recover"] == "": + jobs = {} + + # If recovery or update mode is not active then this is a new run. + if parameters["recover"] == "" and parameters["update"] == "": LOG.info("Initialisation complete.") - longbow(parameters) + longbow(jobs, parameters) + + # If recovery mode is set then start the recovery process. + elif parameters["recover"] != "" and parameters["update"] == "": + + LOG.info("Starting recovery mode to reconnect monitoring of jobs.") + + recovery(jobs, parameters["recover"]) + + # If update mode is set then start the update process. + elif parameters["recover"] == "" and parameters["update"] != "": + + LOG.info("Starting update mode to refresh progress of jobs.") + + update(jobs, parameters["update"]) + # If too many arguments are set, we have a problem else: - LOG.info("Entering recovery mode.") + raise exceptions.CommandlineargsError( + "You have both the --recover and --update command-line flags " + "set, these cannot be used together as they enable " + "conflicting functionality. Either reconnect with persistent " + "monitoring (--recover) or reconnect to refresh the status of " + "jobs and sync current files before disconnecting again " + "(--update).") + + # If the user interrupts Longbow then they are aborting the jobs, so kill + # off any running jobs and then remove the job directories. Otherwise just + # raise all other errors to the top level where in future we can attempt to + # recover. + except KeyboardInterrupt: + + LOG.info("User interrupt detected.") + + if len([a for a in jobs if "lbowconf" not in a]) >= 1: + + LOG.info("Kill any queued or running jobs and clean up.") + + # If we are exiting at this stage then we need to kill off + for item in [a for a in jobs if "lbowconf" not in a]: + + job = jobs[item] - recovery(parameters["recover"]) + if "laststatus" in job: + # If job is not finished delete and stage. + if (job["laststatus"] != "Complete" and + job["laststatus"] != "Finished" and + job["laststatus"] != "Submit Error"): + + # Kill it. + scheduling.delete(job) + + # Transfer the directories as they are. + staging.stage_downstream(job) + + # Job is finished then just stage. + elif job["laststatus"] != "Submit Error": + + # Transfer the directories as they are. + staging.stage_downstream(job) + + staging.cleanup(jobs) + + # If disconnect mode is enabled then the disconnect exception is raised, + # allow to disconnect gracefully. + except exceptions.DisconnectException: + + LOG.info("User specified --disconnect flag on command-line, so " + "Longbow will exit.") + LOG.info("You can reconnect this session for persistent monitoring by " + "using the recovery file:") + LOG.info("longbow --recover {0} --verbose" + .format(jobs["lbowconf"]["recoveryfile"])) + LOG.info("Or an update of current progress followed by disconnecting " + "can be done using:") + LOG.info("longbow --update {0} --verbose" + .format(jobs["lbowconf"]["recoveryfile"])) + + # If disconnect mode is enabled then the disconnect exception is raised, + # allow to disconnect gracefully. + except exceptions.UpdateExit: + + LOG.info("Update of current job progress has completed, exiting.") + LOG.info("You can reconnect this session for persistent monitoring by " + "using the recovery file:") + LOG.info("longbow --recover {0} --verbose" + .format(jobs["lbowconf"]["recoveryfile"])) + LOG.info("Or an update of current progress followed by disconnecting " + "can be done using:") + LOG.info("longbow --update {0} --verbose" + .format(jobs["lbowconf"]["recoveryfile"])) + + # If a problem happens assign the correct level of debug logging. except Exception as err: if parameters["debug"] is True: @@ -242,6 +317,7 @@ def launcher(): LOG.error(err) + # Show nice exit message. finally: LOG.info("Good bye from Longbow!") @@ -249,7 +325,7 @@ def launcher(): "powerful biomolecular simulation software tools.") -def longbow(parameters): +def longbow(jobs, parameters): """Entry point at the top level of the Longbow library. Being the top level method that makes calls on the Longbow library. @@ -266,7 +342,12 @@ def longbow(parameters): # escalating the exception to trigger graceful exit. # Load configurations and initialise Longbow data structures. - jobs = configuration.processconfigs(parameters) + jobparams = configuration.processconfigs(parameters) + + # Copy to jobs so when exceptions are raised the structure is available. + for param in jobparams: + + jobs[param] = jobparams[param] # Test all connection/s specified in the job configurations shellwrappers.checkconnections(jobs) @@ -289,70 +370,25 @@ def longbow(parameters): # uploading. scheduling.prepare(jobs) - # Exceptions that occur before here don't require cleanup operations before - # reporting up. - try: - - # Stage all of the job files along with the scheduling script. - staging.stage_upstream(jobs) + # Stage all of the job files along with the scheduling script. + staging.stage_upstream(jobs) - # Submit all jobs. - scheduling.submit(jobs) + # Submit all jobs. + scheduling.submit(jobs) - # Process the disconnect function. - if parameters["disconnect"] is True: + # Process the disconnect function. + if parameters["disconnect"] is True: - raise exceptions.DisconnectException + raise exceptions.DisconnectException - # Monitor all jobs. - scheduling.monitor(jobs) + # Monitor all jobs. + scheduling.monitor(jobs) - # Clean up all jobs - staging.cleanup(jobs) + # Clean up all jobs + staging.cleanup(jobs) - # If the user interrupts Longbow at this stage then it they are aborting - # the jobs, so kill off any running jobs and then remove the job - # directories. Otherwise just raise all other errors to the top level where - # in future we can attempt to recover. - except KeyboardInterrupt: - LOG.info("User interrupt detected, kill any queued or running jobs " - "and removed any files staged.") - - # If we are exiting at this stage then we need to kill off - for item in jobs: - - job = jobs[item] - - if "laststatus" in job: - - # If job is not finished delete and stage. - if (job["laststatus"] != "Complete" and - job["laststatus"] != "Submit Error"): - - # Kill it. - scheduling.delete(job) - - # Transfer the directories as they are. - staging.stage_downstream(job) - - # Job is finished then just stage. - elif job["laststatus"] != "Submit Error": - - # Transfer the directories as they are. - staging.stage_downstream(job) - - staging.cleanup(jobs) - - except exceptions.DisconnectException: - - LOG.info("User specified --disconnect flag on command-line, so " - "Longbow will exit. You can reconnect this session by using " - "the recovery file, details of this file will be listed in " - "the logs") - - -def recovery(recoveryfile): +def recovery(jobs, recoveryfile): """Recover a Longbow session. This method is for attempting to recover a failed Longbow session or to @@ -367,67 +403,76 @@ def recovery(recoveryfile): recoveryfile (string): A path to the recovery file. """ - LOG.info("Attempting to find the recovery files") - longbowdir = os.path.expanduser('~/.longbow') - jobfile = os.path.join(longbowdir, recoveryfile) + jobfile = os.path.join(os.path.expanduser('~/.longbow'), recoveryfile) + + LOG.info("Attempting to find the recovery file '{0}'".format(jobfile)) # Load the jobs recovery file. if os.path.isfile(jobfile): - LOG.info("Recovery file found at '%s'", jobfile) + LOG.info("Recovery file found.") + + _, _, jobparams = configuration.loadconfigs(jobfile) + + # Copy to jobs so when exceptions are raised the structure is available. + for param in jobparams: - _, _, jobs = configuration.loadconfigs(jobfile) + jobs[param] = jobparams[param] else: raise exceptions.RequiredinputError( - "Recovery file could not be found at '{0}' make sure you are " - "running the recovery from the job directory that was initially " - "used to launch the failed job".format(jobfile)) + "Recovery file could not be found, make sure you haven't deleted " + "the recovery file and that you are not providing the full path, " + "just the file name is needed.") - try: + # Rejoin at the monitoring stage. This will assume that all jobs that + # are no longer in the queue have completed. + scheduling.monitor(jobs) - # Rejoin at the monitoring stage. This will assume that all jobs that - # are no longer in the queue have completed. - scheduling.monitor(jobs) + # Cleanup the remote working directory. + staging.cleanup(jobs) - # Cleanup the remote working directory. - staging.cleanup(jobs) - # If the user interrupts Longbow at this stage then it they are aborting - # the jobs, so kill off any running jobs and then remove the job - # directories. Otherwise just raise all other errors to the top level where - # in future we can attempt to recover. - except KeyboardInterrupt: +def update(jobs, updatefile): + """Trigger update of a disconnected Longbow session. - LOG.info("User interrupt detected, kill any queued or running jobs " - "and removed any files staged.") + This method will start the update process on an existing but disconnected + Longbow session. All job statuses will be checked and updated in the + recovery file and all output files will be synced before disconnecting.""" - # If we are exiting at this stage then we need to kill off - for item in jobs: + jobfile = os.path.join(os.path.expanduser('~/.longbow'), updatefile) - job = jobs[item] + LOG.info("Attempting to find the recovery file '{0}'".format(jobfile)) - if "laststatus" in job: + # Load the jobs recovery file. + if os.path.isfile(jobfile): + + LOG.info("Recovery file found.") + + _, _, jobparams = configuration.loadconfigs(jobfile) - # If job is not finished delete and stage. - if (job["laststatus"] != "Complete" and - job["laststatus"] != "Submit Error"): + # Copy to jobs so when exceptions are raised the structure is available. + for param in jobparams: - # Kill it. - scheduling.delete(job) + jobs[param] = jobparams[param] - # Transfer the directories as they are. - staging.stage_downstream(job) + else: + + raise exceptions.RequiredinputError( + "Recovery file could not be found, make sure you haven't deleted " + "the recovery file and that you are not providing the full path, " + "just the file name is needed.") - # Job is finished then just stage. - elif job["laststatus"] != "Submit Error": + # Add the updater key + jobs["lbowconf"]["update"] = True - # Transfer the directories as they are. - staging.stage_downstream(job) + # Enter monitoring loop + scheduling.monitor(jobs) - staging.cleanup(jobs) + # Cleanup the remote working directory. + staging.cleanup(jobs) def _commandlineproc(alllongbowargs, cmdlnargs, parameters): @@ -668,7 +713,7 @@ def _messageflags(longbowargs): "--debug : additional output to assist " "debugging.\n" "--disconnect : instructs Longbow to disconnect and" - " exit\n after submitting jobs.\n" + " exit\n after submitting jobs.\n" "--examples : downloads example files to " "./LongbowExamples\n" "--help, -h : prints Longbow help.\n" @@ -680,14 +725,14 @@ def _messageflags(longbowargs): "submitted.\n" "--log [file name] : specifies the file Longbow output " "should be directed to.\n" - "--maxtime [HH:MM] : set the maximum job time for all " - "jobs.\n" - "--recover [file name] : launches the recovery mode.\n" + "--recover [file name] : Launches the recovery mode.\n" "--resource [name] : specifies the remote resource.\n" "--replicates [number] : number of replicate jobs to be " "submitted.\n" "--verbose : additional run-time info to be " "output.\n" + "--update [file name] : launches the update mode to sync " + "current job progress and files.\n" "--version, -V : prints Longbow version number.\n" "\n" "Read the documentation at http://www.hecbiosim.ac.uk/ for more " diff --git a/longbow/exceptions.py b/longbow/exceptions.py index 068e179..c0c371b 100644 --- a/longbow/exceptions.py +++ b/longbow/exceptions.py @@ -72,6 +72,13 @@ class DisconnectException(Exception): pass + +class UpdateExit(Exception): + + """Exception, to exit gracefully after update of job progress.""" + + pass + # ----------------------------------------------------------------------------- # Exceptions for applications.py diff --git a/longbow/scheduling.py b/longbow/scheduling.py index 43bbd81..72b5cc7 100644 --- a/longbow/scheduling.py +++ b/longbow/scheduling.py @@ -73,7 +73,6 @@ import longbow.schedulers as schedulers LOG = logging.getLogger("longbow.corelibs.scheduling") -QUEUEINFO = {} def checkenv(jobs, hostconf): @@ -101,7 +100,7 @@ def checkenv(jobs, hostconf): saveparams = {} # Take a look at each job. - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: job = jobs[item] @@ -221,13 +220,17 @@ def monitor(jobs): allfinished = False lastpolltime = 0 laststagetime = 0 - recoveryfile = jobs[list(jobs.keys())[0]]["recoveryfile"] + basepath = os.path.expanduser('~/.longbow') + recoveryfile = os.path.join(basepath, jobs["lbowconf"]["recoveryfile"]) saverecoveryfile = True recoveryfileerror = False # Loop until all jobs are done. while allcomplete is False: + # Sane time interval (CPU core maxes out easily otherwise). + time.sleep(1.0) + now = time.time() # Check if we should be polling. @@ -244,15 +247,6 @@ def monitor(jobs): laststagetime = int(now) saverecoveryfile = _stagejobfiles(jobs, saverecoveryfile) - # Update the queue info settings to each job just in case something - # happens requiring user to use recovery. - for job in jobs: - - jobs[job]["queue-slots"] = \ - QUEUEINFO[jobs[job]["resource"]]["queue-slots"] - jobs[job]["queue-max"] = \ - QUEUEINFO[jobs[job]["resource"]]["queue-max"] - # Save out the recovery files. if (os.path.isdir(os.path.expanduser('~/.longbow')) and saverecoveryfile is True and recoveryfileerror is False and @@ -273,13 +267,18 @@ def monitor(jobs): allcomplete, allfinished = _checkcomplete(jobs) - # Sane time interval (CPU core maxes out easily otherwise). - time.sleep(1.0) + if ("update" in jobs["lbowconf"] and allfinished is False and + allcomplete is False): + + if jobs["lbowconf"]["update"] is True: + + jobs["lbowconf"]["update"] = False + raise exceptions.UpdateExit complete = 0 error = 0 - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: if jobs[job]["laststatus"] == "Submit Error": @@ -310,7 +309,7 @@ def prepare(jobs): """ LOG.info("Creating submit files for job/s.") - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: job = jobs[item] scheduler = job["scheduler"] @@ -330,8 +329,6 @@ def prepare(jobs): LOG.info("For job '%s' user has supplied their own job submit " "script - skipping creation.", item) - job["upload-include"] = job["upload-include"] + ", " + job["subfile"] - except AttributeError: raise exceptions.PluginattributeError( @@ -361,18 +358,15 @@ def submit(jobs): LOG.info("Submitting job/s.") - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: job = jobs[item] - # Have we got this resource already? - if job["resource"] not in QUEUEINFO: - - # no, well create it. - QUEUEINFO[job["resource"]] = {"queue-slots": str(0), - "queue-max": str(0)} + # Set up counters for each resource. + jobs["lbowconf"][job["resource"] + "-" + "queue-slots"] = str(0) + jobs["lbowconf"][job["resource"] + "-" + "queue-max"] = str(0) - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: job = jobs[item] scheduler = job["scheduler"] @@ -387,8 +381,8 @@ def submit(jobs): job["laststatus"] = "Queued" # Increment the queue counter by one (used to count the slots). - QUEUEINFO[job["resource"]]["queue-slots"] = \ - str(int(QUEUEINFO[job["resource"]]["queue-slots"]) + 1) + jobs["lbowconf"][job["resource"] + "-" + "queue-slots"] = str(int( + jobs["lbowconf"][job["resource"] + "-" + "queue-slots"]) + 1) submitted += 1 @@ -411,7 +405,7 @@ def submit(jobs): # Hit maximum slots on resource, Longbow will sub-schedule these. except exceptions.QueuemaxError: - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: if "laststatus" not in jobs[item]: @@ -428,31 +422,25 @@ def submit(jobs): break # We want to find out what the maximum number of slots we have are. - if int(QUEUEINFO[job["resource"]]["queue-slots"]) > \ - int(QUEUEINFO[job["resource"]]["queue-max"]): - - QUEUEINFO[job["resource"]]["queue-max"] = \ - QUEUEINFO[job["resource"]]["queue-slots"] + if int(jobs["lbowconf"][job["resource"] + "-" + "queue-slots"]) > \ + int(jobs["lbowconf"][job["resource"] + "-" + "queue-max"]): - # Store a copy of the queueinfo data in the jobs data structure in case - # recovery is needed. - for item in jobs: - - job = jobs[item] - - job["queue-slots"] = QUEUEINFO[job["resource"]]["queue-slots"] - job["queue-max"] = QUEUEINFO[job["resource"]]["queue-max"] + jobs["lbowconf"][job["resource"] + "-" + "queue-max"] = \ + jobs["lbowconf"][job["resource"] + "-" + "queue-slots"] # Save out the recovery files. if (os.path.isdir(os.path.expanduser('~/.longbow')) and - job["recoveryfile"] != ""): + jobs["lbowconf"]["recoveryfile"] != ""): + + basepath = os.path.expanduser('~/.longbow') + recoveryfile = os.path.join(basepath, jobs["lbowconf"]["recoveryfile"]) try: LOG.info("Recovery file will be placed at path '%s'", - job["recoveryfile"]) + recoveryfile) - configuration.saveini(job["recoveryfile"], jobs) + configuration.saveini(recoveryfile, jobs) except (OSError, IOError): @@ -543,15 +531,7 @@ def _monitorinitialise(jobs): stageinterval = 0 # Sort out some defaults. - for job in jobs: - - # If we came from recovery mode then rebuild the queueinfo structure. - if jobs[job]["resource"] not in QUEUEINFO: - - QUEUEINFO[jobs[job]["resource"]] = { - "queue-slots": jobs[job]["queue-slots"], - "queue-max": jobs[job]["queue-max"] - } + for job in [a for a in jobs if "lbowconf" not in a]: # This should always be present. if "laststatus" not in jobs[job]: @@ -584,7 +564,7 @@ def _polljobs(jobs, save): finihed. """ - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: if (jobs[job]["laststatus"] != "Finished" and jobs[job]["laststatus"] != "Complete" and @@ -614,9 +594,9 @@ def _polljobs(jobs, save): if status == "Finished": - resource = jobs[job]["resource"] - QUEUEINFO[resource]["queue-slots"] = \ - str(int(QUEUEINFO[resource]["queue-slots"]) - 1) + qslots = jobs[job]["resource"] + "-" + "queue-slots" + jobs["lbowconf"][qslots] = str(int( + jobs["lbowconf"][qslots]) - 1) LOG.info("Status of job '%s' with id '%s' is '%s'", job, jobs[job]["jobid"], status) @@ -632,7 +612,7 @@ def _stagejobfiles(jobs, save): complete. This will stop future staging. """ - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: if (jobs[job]["laststatus"] == "Running" or jobs[job]["laststatus"] == "Subjob(s) running" or @@ -651,12 +631,13 @@ def _stagejobfiles(jobs, save): def _checkwaitingjobs(jobs, save): """Check if any jobs marked as "Waiting Submission" can be submitted.""" - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: # Check if we can submit any further jobs. + resource = jobs[job]["resource"] if (jobs[job]["laststatus"] == "Waiting Submission" and - int(QUEUEINFO[jobs[job]["resource"]]["queue-slots"]) < - int(QUEUEINFO[jobs[job]["resource"]]["queue-max"])): + int(jobs["lbowconf"][resource + "-" + "queue-slots"]) < + int(jobs["lbowconf"][resource + "-" + "queue-max"])): # Try and submit this job. try: @@ -670,8 +651,8 @@ def _checkwaitingjobs(jobs, save): jobs[job]["jobid"]) # Increment the queue counter by one (used to count the slots). - QUEUEINFO[jobs[job]["resource"]]["queue-slots"] = str( - int(QUEUEINFO[jobs[job]["resource"]]["queue-slots"]) + 1) + jobs["lbowconf"][resource + "-" + "queue-slots"] = str(int( + jobs["lbowconf"][resource + "-" + "queue-slots"]) + 1) save = True @@ -711,7 +692,7 @@ def _checkcomplete(jobs): error = [] finished = [] - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: if jobs[job]["laststatus"] != "Submit Error": @@ -730,7 +711,7 @@ def _checkcomplete(jobs): allcomplete = True - if len(error) == len(jobs): + if len(error) == len([a for a in jobs if "lbowconf" not in a]): allcomplete = True diff --git a/longbow/shellwrappers.py b/longbow/shellwrappers.py index 95ce3bf..9dfaf09 100644 --- a/longbow/shellwrappers.py +++ b/longbow/shellwrappers.py @@ -129,7 +129,7 @@ def checkconnections(jobs): # Test all of the computers listed in jobs in the job configuration # file, there is no need to check all the ones listed in host # configuration each time if they are not used. - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: # Have we checked this connection already? if jobs[item]["resource"] not in checked: @@ -160,7 +160,7 @@ def checkconnections(jobs): # Go over all jobs referencing this machine and switch on # the environment fix. - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: if jobs[job]["resource"] == jobs[item]["resource"]: diff --git a/longbow/staging.py b/longbow/staging.py index 842cae5..49043f2 100644 --- a/longbow/staging.py +++ b/longbow/staging.py @@ -84,7 +84,7 @@ def stage_upstream(jobs): """ LOG.info("Staging files for job/s.") - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: job = jobs[item] destdir = job["destdir"] @@ -173,7 +173,7 @@ def cleanup(jobs): """ LOG.info("Cleaning up the work directories.") - for item in jobs: + for item in [a for a in jobs if "lbowconf" not in a]: job = jobs[item] destdir = job["destdir"] @@ -221,11 +221,13 @@ def cleanup(jobs): pass - if (jobs[list(jobs.keys())[0]]["recoveryfile"] != "" and - os.path.isfile(jobs[list(jobs.keys())[0]]["recoveryfile"])): + recfile = jobs["lbowconf"]["recoveryfile"] + fpath = os.path.expanduser('~/.longbow') + + if (recfile != "" and os.path.isfile(os.path.join(fpath, recfile))): LOG.info("Removing the recovery file.") - os.remove(jobs[list(jobs.keys())[0]]["recoveryfile"]) + os.remove(os.path.join(fpath, recfile)) LOG.info("Cleaning up complete.")
Implement update mode for disconnected sessions When using the disconnect mode with --disconnect, currently the only way to reconnect is via the --recover option, but this reconnects permanently without then disconnecting again. A new mode should be implemented to reconnect to update the status of the jobs, transfer files and then disconnect again. This should be able to be done as many times as the user wishes up until completion of all jobs.
HECBioSim/Longbow
diff --git a/tests/unit/configuration/test_saveini.py b/tests/unit/configuration/test_saveini.py index 2cd4a83..d0cd843 100644 --- a/tests/unit/configuration/test_saveini.py +++ b/tests/unit/configuration/test_saveini.py @@ -56,3 +56,50 @@ def test_saveini_test1(): assert open("/tmp/initest", "rb").read() == open( os.path.join(os.getcwd(), "tests/standards/saveini.txt"), "rb").read() + + +def test_saveini_test2(): + + """ + A more advanced test checking that the internal configuration data is + saved. + """ + + params = { + "lbowconf": { + "update": False, + "hpc1-queue-max": 0, + "hpc1-queue-slots": 0 + }, + "job1": { + "param1": "val1", + "param2": "val2", + }, + "job2": { + "parama": "vala", + "paramb": "valb", + }, + "job3": { + "parami": "vali", + "paramii": "valii", + } + } + + saveini("/tmp/initest2", params) + + with open("/tmp/initest2", "rb") as tmpfile: + tmpcontents = tmpfile.read().decode("utf-8") + + assert "[lbowconf]\n" in tmpcontents + assert "hpc1-queue-max = 0\n" in tmpcontents + assert "update = False\n" in tmpcontents + assert "hpc1-queue-slots = 0\n" in tmpcontents + assert "[job3]\n" in tmpcontents + assert "[job2]\n" in tmpcontents + assert "[job1]\n" in tmpcontents + assert "param1 = val1\n" in tmpcontents + assert "param2 = val2\n" in tmpcontents + assert "parama = vala\n" in tmpcontents + assert "paramb = valb\n" in tmpcontents + assert "parami = vali\n" in tmpcontents + assert "paramii = valii\n" in tmpcontents diff --git a/tests/unit/entrypoints/test_launcher.py b/tests/unit/entrypoints/test_launcher.py index 1054867..9da91c4 100644 --- a/tests/unit/entrypoints/test_launcher.py +++ b/tests/unit/entrypoints/test_launcher.py @@ -47,6 +47,43 @@ except ImportError: import longbow.exceptions as exceptions from longbow.entrypoints import launcher +from longbow.exceptions import UpdateExit + + +def _configload(_): + + "Mock configuration" + + jobs={"lbowconf": {"recoveryfile": "recovery.file"}} + + return jobs + + +def _runningjobs(_): + + "Set up two running jobs" + + jobs = {"job1": {"laststatus": "Running"}, "job2": {"laststatus": "Running"}} + + return jobs + + +def _finishedjobs(_): + + "Set up two running jobs" + + jobs = {"job1": {"laststatus": "Finished"}, "job2": {"laststatus": "Finished"}} + + return jobs + + +def _completejobs(_): + + "Set up two running jobs" + + jobs = {"job1": {"laststatus": "Complete"}, "job2": {"laststatus": "Complete"}} + + return jobs @mock.patch('longbow.entrypoints.longbow') @@ -67,7 +104,7 @@ def test_main_test1(m_isfile, m_longbowmain): launcher() - params = m_longbowmain.call_args[0][0] + params = m_longbowmain.call_args[0][1] assert m_longbowmain.call_count == 1 assert params["debug"] is False @@ -104,7 +141,7 @@ def test_main_test2(m_isfile, m_longbowmain): launcher() - params = m_longbowmain.call_args[0][0] + params = m_longbowmain.call_args[0][1] assert m_longbowmain.call_count == 1 assert params["debug"] is False @@ -139,15 +176,66 @@ def test_main_test3(m_isfile, m_recovery): launcher() - params = m_recovery.call_args[0][0] + params = m_recovery.call_args[0][1] assert m_recovery.call_count == 1 assert params == "recovery.file" @mock.patch('longbow.entrypoints.longbow') [email protected]('longbow.entrypoints.recovery') [email protected]('longbow.entrypoints.update') @mock.patch('os.path.isfile') -def test_main_test4(m_isfile, m_longbowmain): +def test_main_test4(m_isfile, m_update, m_recovery, m_longbow): + + """ + Check that the update method gets called, this is a rudimentary test. + """ + + m_isfile.return_value = True + + args = ["longbow", "--update", "update.file", "--log", "new-log.file", + "--verbose"] + + with mock.patch('sys.argv', args): + + launcher() + + params = m_update.call_args[0][1] + + assert m_longbow.call_count == 0 + assert m_recovery.call_count == 0 + assert m_update.call_count == 1 + assert params == "update.file" + + [email protected]('longbow.entrypoints.longbow') [email protected]('longbow.entrypoints.recovery') [email protected]('longbow.entrypoints.update') [email protected]('os.path.isfile') +def test_main_test5(m_isfile, m_update, m_recovery, m_longbow): + + """ + Check that longbow doesn't launch if too many args are given. + """ + + m_isfile.return_value = True + + args = ["longbow", "--recover", "recovery.file", "--update", "update.file", + "--log", "new-log.file", "--verbose"] + + with mock.patch('sys.argv', args): + + launcher() + + assert m_longbow.call_count == 0 + assert m_recovery.call_count == 0 + assert m_update.call_count == 0 + + [email protected]('longbow.entrypoints.longbow') [email protected]('os.path.isfile') +def test_main_test6(m_isfile, m_longbowmain): """ Test that exception handling happens properly. @@ -165,7 +253,7 @@ def test_main_test4(m_isfile, m_longbowmain): launcher() - params = m_longbowmain.call_args[0][0] + params = m_longbowmain.call_args[0][1] assert m_longbowmain.call_count == 1 assert params["debug"] is False @@ -185,7 +273,7 @@ def test_main_test4(m_isfile, m_longbowmain): @mock.patch('longbow.entrypoints.longbow') @mock.patch('os.path.isfile') -def test_main_test5(m_isfile, m_longbowmain): +def test_main_test7(m_isfile, m_longbowmain): """ Test that exception handling happens properly. @@ -203,7 +291,7 @@ def test_main_test5(m_isfile, m_longbowmain): launcher() - params = m_longbowmain.call_args[0][0] + params = m_longbowmain.call_args[0][1] assert m_longbowmain.call_count == 1 assert params["debug"] is True @@ -224,7 +312,7 @@ def test_main_test5(m_isfile, m_longbowmain): @mock.patch('longbow.entrypoints.recovery') @mock.patch('longbow.entrypoints.longbow') @mock.patch('os.path.isfile') -def test_main_test6(m_isfile, m_longbowmain, m_recovery): +def test_main_test8(m_isfile, m_longbowmain, m_recovery): """ Test that exception handling happens properly. @@ -241,3 +329,226 @@ def test_main_test6(m_isfile, m_longbowmain, m_recovery): assert m_longbowmain.call_count == 0 assert m_recovery.call_count == 0 + + [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') [email protected]('os.path.isfile') +def test_main_test9(m_isfile, m_procconf, m_testcon, m_testenv, m_testapp, + m_procjob, m_schedprep, m_stagup, m_sub, m_mon, m_del, + m_stagdown, m_clean): + + """Test the keyboard interrupt feature with running jobs.""" + + m_isfile.return_value = True + + args = ["longbow", "--job", "testjob", "--resource", "big-machine", + "--debug"] + + m_procconf.side_effect = _runningjobs + m_mon.side_effect = KeyboardInterrupt + + with mock.patch('sys.argv', args): + + launcher() + + assert m_procconf.call_count == 1 + assert m_testcon.call_count == 1 + assert m_testenv.call_count == 1 + assert m_testapp.call_count == 1 + assert m_procjob.call_count == 1 + assert m_schedprep.call_count == 1 + assert m_stagup.call_count == 1 + assert m_sub.call_count == 1 + assert m_mon.call_count == 1 + assert m_del.call_count == 2 + assert m_stagdown.call_count == 2 + assert m_clean.call_count == 1 + + [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') [email protected]('os.path.isfile') +def test_main_test10(m_isfile, m_procconf, m_testcon, m_testenv, m_testapp, + m_procjob, m_schedprep, m_stagup, m_sub, m_mon, m_del, + m_stagdown, m_clean): + + """Test the keyboard interrupt feature with running jobs.""" + + m_isfile.return_value = True + + args = ["longbow", "--job", "testjob", "--resource", "big-machine", + "--debug"] + + m_procconf.side_effect = _finishedjobs + m_mon.side_effect = KeyboardInterrupt + + with mock.patch('sys.argv', args): + + launcher() + + assert m_procconf.call_count == 1 + assert m_testcon.call_count == 1 + assert m_testenv.call_count == 1 + assert m_testapp.call_count == 1 + assert m_procjob.call_count == 1 + assert m_schedprep.call_count == 1 + assert m_stagup.call_count == 1 + assert m_sub.call_count == 1 + assert m_mon.call_count == 1 + assert m_del.call_count == 0 + assert m_stagdown.call_count == 2 + assert m_clean.call_count == 1 + + [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') [email protected]('os.path.isfile') +def test_main_test11(m_isfile, m_procconf, m_testcon, m_testenv, m_testapp, + m_procjob, m_schedprep, m_stagup, m_sub, m_mon, m_del, + m_stagdown, m_clean): + + """Test the keyboard interrupt feature with complete jobs.""" + + m_isfile.return_value = True + + args = ["longbow", "--job", "testjob", "--resource", "big-machine", + "--debug"] + + m_procconf.side_effect = _completejobs + m_mon.side_effect = KeyboardInterrupt + + with mock.patch('sys.argv', args): + + launcher() + + assert m_procconf.call_count == 1 + assert m_testcon.call_count == 1 + assert m_testenv.call_count == 1 + assert m_testapp.call_count == 1 + assert m_procjob.call_count == 1 + assert m_schedprep.call_count == 1 + assert m_stagup.call_count == 1 + assert m_sub.call_count == 1 + assert m_mon.call_count == 1 + assert m_del.call_count == 0 + assert m_stagdown.call_count == 2 + assert m_clean.call_count == 1 + + [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') [email protected]('os.path.isfile') +def test_main_test12(m_isfile, m_procconf, m_testcon, m_testenv, m_testapp, + m_procjob, m_schedprep, m_stagup, m_sub, m_mon, m_del, + m_stagdown, m_clean): + + """Test the disconnect feature with complete jobs.""" + + m_isfile.return_value = True + + args = ["longbow", "--job", "testjob", "--disconnect", "--debug"] + + m_procconf.side_effect = _configload + + with mock.patch('sys.argv', args): + + launcher() + + assert m_procconf.call_count == 1 + assert m_testcon.call_count == 1 + assert m_testenv.call_count == 1 + assert m_testapp.call_count == 1 + assert m_procjob.call_count == 1 + assert m_schedprep.call_count == 1 + assert m_stagup.call_count == 1 + assert m_sub.call_count == 1 + assert m_mon.call_count == 0 + assert m_del.call_count == 0 + assert m_stagdown.call_count == 0 + assert m_clean.call_count == 0 + + [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') [email protected]('os.path.isfile') +def test_main_test13(m_isfile, m_procconf, m_testcon, m_testenv, m_testapp, + m_procjob, m_schedprep, m_stagup, m_sub, m_mon, m_del, + m_stagdown, m_clean): + + """Test the keyboard interrupt feature with complete jobs.""" + + m_isfile.return_value = True + + args = ["longbow", "--job", "testjob", "--resource", "big-machine", + "--debug"] + + m_procconf.side_effect = _configload + m_mon.side_effect = UpdateExit + + with mock.patch('sys.argv', args): + + launcher() + + assert m_procconf.call_count == 1 + assert m_testcon.call_count == 1 + assert m_testenv.call_count == 1 + assert m_testapp.call_count == 1 + assert m_procjob.call_count == 1 + assert m_schedprep.call_count == 1 + assert m_stagup.call_count == 1 + assert m_sub.call_count == 1 + assert m_mon.call_count == 1 + assert m_del.call_count == 0 + assert m_stagdown.call_count == 0 + assert m_clean.call_count == 0 diff --git a/tests/unit/entrypoints/test_longbow.py b/tests/unit/entrypoints/test_longbow.py index 5493f68..89b49e8 100644 --- a/tests/unit/entrypoints/test_longbow.py +++ b/tests/unit/entrypoints/test_longbow.py @@ -43,6 +43,8 @@ except ImportError: import mock +import pytest +import longbow.exceptions as exceptions from longbow.entrypoints import longbow @@ -69,7 +71,8 @@ def test_longbowmain_disconnect(m_procconf, m_testcon, m_testenv, m_testapp, "nochecks": False } - longbow(params) + with pytest.raises(exceptions.DisconnectException): + longbow({}, params) assert m_procconf.call_count == 1 assert m_testcon.call_count == 1 @@ -106,7 +109,7 @@ def test_longbowmain_testcalls1(m_procconf, m_testcon, m_testenv, m_testapp, "nochecks": False } - longbow(params) + longbow({}, params) assert m_procconf.call_count == 1 assert m_testcon.call_count == 1 @@ -144,7 +147,7 @@ def test_longbowmain_testcalls2(m_procconf, m_testcon, m_testenv, m_testapp, "nochecks": True } - longbow(params) + longbow({}, params) assert m_procconf.call_count == 1 assert m_testcon.call_count == 1 @@ -157,108 +160,3 @@ def test_longbowmain_testcalls2(m_procconf, m_testcon, m_testenv, m_testapp, assert m_mon.call_count == 1 assert m_clean.call_count == 1 - [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') -def test_longbowmain_killrunning(m_procconf, m_testcon, m_testenv, m_testapp, - m_procjob, m_schedprep, m_stagup, m_sub, - m_mon, m_del, m_stagdown, m_clean): - - """ - Check that the correct function calls are made. - """ - - params = { - "hosts": "some/file", - "disconnect": False, - "nochecks": False - } - - m_procconf.return_value = { - "job1": { - "laststatus": "Running" - }, - "job2": { - "laststatus": "Running" - } - } - - m_mon.side_effect = KeyboardInterrupt - - longbow(params) - - assert m_procconf.call_count == 1 - assert m_testcon.call_count == 1 - assert m_testenv.call_count == 1 - assert m_testapp.call_count == 1 - assert m_procjob.call_count == 1 - assert m_schedprep.call_count == 1 - assert m_stagup.call_count == 1 - assert m_sub.call_count == 1 - assert m_mon.call_count == 1 - assert m_del.call_count == 2 - assert m_stagdown.call_count == 2 - assert m_clean.call_count == 1 - - [email protected]('longbow.staging.cleanup') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.scheduling.monitor') [email protected]('longbow.scheduling.submit') [email protected]('longbow.staging.stage_upstream') [email protected]('longbow.scheduling.prepare') [email protected]('longbow.applications.processjobs') [email protected]('longbow.applications.checkapp') [email protected]('longbow.scheduling.checkenv') [email protected]('longbow.shellwrappers.checkconnections') [email protected]('longbow.configuration.processconfigs') -def test_longbowmain_killcomplete(m_procconf, m_testcon, m_testenv, m_testapp, - m_procjob, m_schedprep, m_stagup, m_sub, - m_mon, m_del, m_stagdown, m_clean): - - """ - Check that the correct function calls are made. - """ - - params = { - "hosts": "some/file", - "disconnect": False, - "nochecks": False - } - - m_procconf.return_value = { - "job1": { - "laststatus": "Complete" - }, - "job2": { - "laststatus": "Complete" - } - } - - m_mon.side_effect = KeyboardInterrupt - - longbow(params) - - assert m_procconf.call_count == 1 - assert m_testcon.call_count == 1 - assert m_testenv.call_count == 1 - assert m_testapp.call_count == 1 - assert m_procjob.call_count == 1 - assert m_schedprep.call_count == 1 - assert m_stagup.call_count == 1 - assert m_sub.call_count == 1 - assert m_mon.call_count == 1 - assert m_del.call_count == 0 - assert m_stagdown.call_count == 2 - assert m_clean.call_count == 1 diff --git a/tests/unit/entrypoints/test_recovery.py b/tests/unit/entrypoints/test_recovery.py index d7bf881..3a36404 100644 --- a/tests/unit/entrypoints/test_recovery.py +++ b/tests/unit/entrypoints/test_recovery.py @@ -60,12 +60,12 @@ def test_recovery_check(mock_file, mock_mon, mock_clean, mock_load): """ mock_file.return_value = True - mock_load.return_value = ("", "", "testjobs") + mock_load.return_value = ("", "", {"testjobs": {}}) - recovery("recovery.file") + recovery({}, "recovery.file") - assert mock_mon.call_args[0][0] == "testjobs" - assert mock_clean.call_args[0][0] == "testjobs" + assert mock_mon.call_args[0][0] == {"testjobs": {}} + assert mock_clean.call_args[0][0] == {"testjobs": {}} @mock.patch('longbow.staging.cleanup') @@ -83,84 +83,5 @@ def test_recovery_except(mock_isfile, mock_monitor, mock_cleanup): with pytest.raises(exceptions.RequiredinputError): - recovery("recovery.file") + recovery({}, "recovery.file") - [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.configuration.loadconfigs') [email protected]('longbow.staging.cleanup') [email protected]('longbow.scheduling.monitor') [email protected]('os.path.isfile') -def test_recovery_interupt(m_file, m_mon, m_clean, m_load, m_del, m_down): - - """ - Check that user interrupt doesn't result in unhandled exception. - """ - - m_file.return_value = True - m_mon.side_effect = KeyboardInterrupt - m_clean.return_value = None - m_load.return_value = ("", "", {"testjobs": {}}) - m_del.return_value = None - m_down.return_value = None - - recovery("recovery.file") - - assert m_clean.call_count == 1 - assert m_down.call_count == 0 - assert m_del.call_count == 0 - - [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.configuration.loadconfigs') [email protected]('longbow.staging.cleanup') [email protected]('longbow.scheduling.monitor') [email protected]('os.path.isfile') -def test_recovery_interupt2(m_file, m_mon, m_clean, m_load, m_del, m_down): - - """ - Check that user interrupt doesn't result in unhandled exception. Also test - the case where jobs are running. - """ - - m_file.return_value = True - m_mon.side_effect = KeyboardInterrupt - m_clean.return_value = None - m_load.return_value = ("", "", {"testjobs": {"laststatus": "Running"}}) - m_del.return_value = None - m_down.return_value = None - - recovery("recovery.file") - - assert m_clean.call_count == 1 - assert m_down.call_count == 1 - assert m_del.call_count == 1 - - [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling.delete') [email protected]('longbow.configuration.loadconfigs') [email protected]('longbow.staging.cleanup') [email protected]('longbow.scheduling.monitor') [email protected]('os.path.isfile') -def test_recovery_interupt3(m_file, m_mon, m_clean, m_load, m_del, m_down): - - """ - Check that user interrupt doesn't result in unhandled exception. Also test - the case where jobs are finished. - """ - - m_file.return_value = True - m_mon.side_effect = KeyboardInterrupt - m_clean.return_value = None - m_load.return_value = ("", "", {"testjobs": {"laststatus": "Complete"}}) - m_del.return_value = None - m_down.return_value = None - - recovery("recovery.file") - - assert m_clean.call_count == 1 - assert m_down.call_count == 1 - assert m_del.call_count == 0 diff --git a/tests/unit/entrypoints/test_update.py b/tests/unit/entrypoints/test_update.py new file mode 100644 index 0000000..ee65629 --- /dev/null +++ b/tests/unit/entrypoints/test_update.py @@ -0,0 +1,87 @@ +# BSD 3-Clause License +# +# Copyright (c) 2017, Science and Technology Facilities Council and +# The University of Nottingham +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +This testing module contains the tests for the recovery method within the +entrypoint module. +""" + +try: + + from unittest import mock + +except ImportError: + + import mock + +import pytest + +import longbow.exceptions as exceptions +from longbow.entrypoints import update + + [email protected]('longbow.staging.cleanup') [email protected]('longbow.configuration.loadconfigs') [email protected]('longbow.scheduling.monitor') [email protected]('os.path.isfile') +def test_update_check(mock_file, mock_mon, mock_load, m_clean): + + """ + Check that the correct function calls are made. + """ + + mock_file.return_value = True + mock_load.return_value = ("", "", { + "testparam": "test", "lbowconf": {"update": False}}) + + update({}, "update.file") + + params = mock_mon.call_args[0][0] + + assert params["testparam"] == "test" + assert params["lbowconf"]["update"] is True + + [email protected]('longbow.scheduling.monitor') [email protected]('os.path.isfile') +def test_recovery_except(mock_isfile, mock_monitor): + + """ + Check that exception is raised on bad file. + """ + + mock_isfile.return_value = False + mock_monitor.return_value = None + + with pytest.raises(exceptions.RequiredinputError): + + update({}, "update.file") diff --git a/tests/unit/scheduling/test_checkwaitingjobs.py b/tests/unit/scheduling/test_checkwaitingjobs.py index f760780..36c33b6 100644 --- a/tests/unit/scheduling/test_checkwaitingjobs.py +++ b/tests/unit/scheduling/test_checkwaitingjobs.py @@ -46,7 +46,7 @@ except ImportError: import pytest import longbow.exceptions as exceptions -from longbow.scheduling import _checkwaitingjobs, QUEUEINFO +from longbow.scheduling import _checkwaitingjobs def addjobid(job): @@ -67,13 +67,16 @@ def test_checkwaitingjobs_none(mock_submit): jobs = { "jobone": { - "laststatus": "Running" + "laststatus": "Running", + "resource": "hpc-1" }, "jobtwo": { - "laststatus": "Finished" + "laststatus": "Finished", + "resource": "hpc-1" }, "jobthree": { - "laststatus": "Complete" + "laststatus": "Complete", + "resource": "hpc-1" } } @@ -91,6 +94,10 @@ def test_checkwaitingjobs_one(mock_submit): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 1, + "test-machine-queue-max": 2 + }, "jobone": { "laststatus": "Running", "resource": "test-machine", @@ -109,14 +116,11 @@ def test_checkwaitingjobs_one(mock_submit): } mock_submit.side_effect = addjobid - QUEUEINFO["test-machine"] = {} - QUEUEINFO["test-machine"]["queue-slots"] = 1 - QUEUEINFO["test-machine"]["queue-max"] = 2 _checkwaitingjobs(jobs, False) assert mock_submit.call_count == 1, "Should be submitting one job" - assert QUEUEINFO["test-machine"]["queue-slots"] == "2" + assert jobs["lbowconf"]["test-machine-queue-slots"] == "2" @mock.patch('longbow.schedulers.lsf.submit') @@ -128,6 +132,10 @@ def test_checkwaitingjobs_two(mock_submit): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 1, + "test-machine-queue-max": 8 + }, "jobone": { "laststatus": "Running", "resource": "test-machine", @@ -146,13 +154,11 @@ def test_checkwaitingjobs_two(mock_submit): } mock_submit.side_effect = addjobid - QUEUEINFO["test-machine"]["queue-slots"] = 1 - QUEUEINFO["test-machine"]["queue-max"] = 8 _checkwaitingjobs(jobs, False) assert mock_submit.call_count == 2, "Should be submitting two jobs" - assert QUEUEINFO["test-machine"]["queue-slots"] == "3" + assert jobs["lbowconf"]["test-machine-queue-slots"] == "3" @mock.patch('longbow.schedulers.lsf.submit') @@ -163,6 +169,10 @@ def test_checkwaitingjobs_except1(mock_submit): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 1, + "test-machine-queue-max": 8 + }, "jobone": { "laststatus": "Running", "resource": "test-machine", @@ -181,8 +191,6 @@ def test_checkwaitingjobs_except1(mock_submit): } mock_submit.side_effect = AttributeError - QUEUEINFO["test-machine"]["queue-slots"] = 1 - QUEUEINFO["test-machine"]["queue-max"] = 8 with pytest.raises(exceptions.PluginattributeError): @@ -198,6 +206,10 @@ def test_checkwaitingjobs_except2(mock_submit): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 1, + "test-machine-queue-max": 8 + }, "jobone": { "laststatus": "Running", "resource": "test-machine", @@ -216,8 +228,6 @@ def test_checkwaitingjobs_except2(mock_submit): } mock_submit.side_effect = exceptions.JobsubmitError - QUEUEINFO["test-machine"]["queue-slots"] = 1 - QUEUEINFO["test-machine"]["queue-max"] = 8 _checkwaitingjobs(jobs, False) @@ -235,6 +245,10 @@ def test_checkwaitingjobs_except3(mock_submit): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 1, + "test-machine-queue-max": 8 + }, "jobone": { "laststatus": "Running", "resource": "test-machine", @@ -253,8 +267,6 @@ def test_checkwaitingjobs_except3(mock_submit): } mock_submit.side_effect = exceptions.QueuemaxError - QUEUEINFO["test-machine"]["queue-slots"] = "1" - QUEUEINFO["test-machine"]["queue-max"] = "8" _checkwaitingjobs(jobs, False) diff --git a/tests/unit/scheduling/test_monitor.py b/tests/unit/scheduling/test_monitor.py index 2b2def8..513647d 100644 --- a/tests/unit/scheduling/test_monitor.py +++ b/tests/unit/scheduling/test_monitor.py @@ -46,7 +46,7 @@ except ImportError: import pytest import longbow.exceptions as exceptions -from longbow.scheduling import monitor, QUEUEINFO +from longbow.scheduling import monitor def jobstatus(jobs, _): @@ -55,13 +55,13 @@ def jobstatus(jobs, _): Change status of the job """ - for job in jobs: + for job in [a for a in jobs if "lbowconf" not in a]: if jobs[job]["laststatus"] == "Queued": jobs[job]["laststatus"] = "Running" - if jobs[job]["laststatus"] == "Running": + elif jobs[job]["laststatus"] == "Running": jobs[job]["laststatus"] = "Finished" @@ -78,17 +78,16 @@ def test_monitor_testpollfrequency(mock_init, mock_poll, mock_wait): import time jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 1, + "hpc1-queue-max": 2 + }, "jobone": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Running", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Running" } } - QUEUEINFO["hpc1"] = {} - QUEUEINFO["hpc1"]["queue-slots"] = "1" - QUEUEINFO["hpc1"]["queue-max"] = "2" mock_init.return_value = 0, 2 mock_poll.return_value = False @@ -120,17 +119,16 @@ def test_monitor_teststagefreq(mock_init, mock_poll, mock_wait, mock_down): import time jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 1, + "hpc1-queue-max": 2 + }, "jobone": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Running", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Running" } } - QUEUEINFO["hpc1"] = {} - QUEUEINFO["hpc1"]["queue-slots"] = "1" - QUEUEINFO["hpc1"]["queue-max"] = "2" mock_init.return_value = 1, 1 mock_poll.return_value = False @@ -164,33 +162,25 @@ def test_monitor_complete1(mock_init, mock_poll, mock_wait, mock_down, """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 1, + "hpc1-queue-max": 2 + }, "jobone": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Finished", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Finished" }, "jobtwo": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Complete", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Complete" }, "jobthree": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Submit Error", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Submit Error" } } - QUEUEINFO["hpc1"] = {} - QUEUEINFO["hpc1"]["queue-slots"] = "1" - QUEUEINFO["hpc1"]["queue-max"] = "2" - mock_init.return_value = 0, 1 mock_poll.return_value = False mock_wait.return_value = False @@ -218,47 +208,33 @@ def test_monitor_complete2(mock_init, mock_poll, mock_wait, mock_down, """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 1, + "hpc1-queue-max": 2 + }, "jobone": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Finished", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Finished" }, "jobtwo": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Complete", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Complete" }, "jobthree": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Submit Error", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Submit Error" }, "jobfour": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Queued", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Queued" }, "jobfive": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Running", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Running" } } - QUEUEINFO["hpc1"] = {} - QUEUEINFO["hpc1"]["queue-slots"] = "1" - QUEUEINFO["hpc1"]["queue-max"] = "2" - mock_init.return_value = 0, 1 mock_poll.return_value = False mock_poll.side_effect = jobstatus @@ -290,47 +266,33 @@ def test_monitor_run1(mock_init, mock_poll, mock_wait, mock_down, """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 1, + "hpc1-queue-max": 2 + }, "jobone": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Running", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Running" }, "jobtwo": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Running", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Running" }, "jobthree": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Queued", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Queued" }, "jobfour": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Queued", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Queued" }, "jobfive": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Queued", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Queued" } } - QUEUEINFO["hpc1"] = {} - QUEUEINFO["hpc1"]["queue-slots"] = "1" - QUEUEINFO["hpc1"]["queue-max"] = "2" - mock_init.return_value = 0, 1 mock_poll.return_value = False mock_poll.side_effect = jobstatus @@ -363,31 +325,24 @@ def test_monitor_except(mock_init, mock_poll, mock_wait, mock_down, """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 1, + "hpc1-queue-max": 2 + }, "jobone": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Finished", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Finished" }, "jobtwo": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Complete", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Complete" }, "jobthree": { "resource": "hpc1", - "queue-max": "0", - "queue-slots": "0", - "laststatus": "Submit Error", - "recoveryfile": "recovery-YYMMDD-HHMMSS" + "laststatus": "Submit Error" } } - QUEUEINFO["hpc1"] = {} - QUEUEINFO["hpc1"]["queue-slots"] = "1" - QUEUEINFO["hpc1"]["queue-max"] = "2" mock_init.return_value = 0, 1 mock_poll.return_value = False @@ -399,3 +354,64 @@ def test_monitor_except(mock_init, mock_poll, mock_wait, mock_down, assert jobs["jobone"]["laststatus"] == "Complete" assert mock_save.call_count == 1 + + [email protected]('longbow.configuration.saveini') [email protected]('longbow.staging.stage_downstream') [email protected]('longbow.scheduling._checkwaitingjobs') [email protected]('longbow.scheduling._polljobs') [email protected]('longbow.scheduling._monitorinitialise') +def test_monitor_update(mock_init, mock_poll, mock_wait, mock_down, mock_save): + + """ + Test that when all jobs complete the method exits. + """ + + jobs = { + "lbowconf": { + "update": True, + "recoveryfile": "recovery-YYMMDD-HHMMSS", + "hpc1-queue-slots": 2, + "hpc1-queue-max": 8 + }, + "jobone": { + "resource": "hpc1", + "laststatus": "Running" + }, + "jobtwo": { + "resource": "hpc1", + "laststatus": "Running" + }, + "jobthree": { + "resource": "hpc1", + "laststatus": "Queued" + }, + "jobfour": { + "resource": "hpc1", + "laststatus": "Queued" + }, + "jobfive": { + "resource": "hpc1", + "laststatus": "Queued" + } + } + + mock_init.return_value = 0, 1 + mock_poll.return_value = True + mock_poll.side_effect = jobstatus + mock_wait.return_value = True + + with pytest.raises(exceptions.UpdateExit): + + monitor(jobs) + + assert jobs["lbowconf"]["update"] is False + assert jobs["jobone"]["laststatus"] == "Finished" + assert jobs["jobtwo"]["laststatus"] == "Finished" + assert jobs["jobthree"]["laststatus"] == "Running" + assert jobs["jobfour"]["laststatus"] == "Running" + assert jobs["jobfive"]["laststatus"] == "Running" + assert mock_poll.call_count == 1 + assert mock_wait.call_count == 1 + assert mock_down.call_count == 0 + assert mock_save.call_count == 1 diff --git a/tests/unit/scheduling/test_polljobs.py b/tests/unit/scheduling/test_polljobs.py index cf0e30d..582e122 100644 --- a/tests/unit/scheduling/test_polljobs.py +++ b/tests/unit/scheduling/test_polljobs.py @@ -46,7 +46,7 @@ except ImportError: import pytest import longbow.exceptions as exceptions -from longbow.scheduling import _polljobs, QUEUEINFO +from longbow.scheduling import _polljobs @mock.patch('longbow.schedulers.lsf.status') @@ -106,6 +106,10 @@ def test_polljobs_finished(mock_status): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 2, + "test-machine-queue-max": 4 + }, "jobone": { "resource": "test-machine", "laststatus": "Running", @@ -144,7 +148,6 @@ def test_polljobs_finished(mock_status): } } - QUEUEINFO["test-machine"]["queue-slots"] = "2" mock_status.return_value = "Finished" _polljobs(jobs, False) @@ -152,7 +155,7 @@ def test_polljobs_finished(mock_status): "Should only be polling running and queued jobs" assert jobs["jobone"]["laststatus"] == "Finished" assert jobs["jobtwo"]["laststatus"] == "Finished" - assert QUEUEINFO["test-machine"]["queue-slots"] == "0" + assert jobs["lbowconf"]["test-machine-queue-slots"] == "0" @mock.patch('longbow.schedulers.lsf.status') diff --git a/tests/unit/scheduling/test_prepare.py b/tests/unit/scheduling/test_prepare.py index 8a69f3e..dff69d5 100644 --- a/tests/unit/scheduling/test_prepare.py +++ b/tests/unit/scheduling/test_prepare.py @@ -141,8 +141,7 @@ def test_prepare_ownscript(mock_prepare): "resource": "test-machine", "scheduler": "LSF", "jobid": "test456", - "subfile": "test.lsf", - "upload-include": "file1, file2, file3" + "subfile": "test.lsf" } } @@ -150,4 +149,3 @@ def test_prepare_ownscript(mock_prepare): assert mock_prepare.call_count == 0, \ "This method shouldn't be called at all in this case." - assert jobs["job-one"]["upload-include"] == "file1, file2, file3, test.lsf" diff --git a/tests/unit/scheduling/test_submit.py b/tests/unit/scheduling/test_submit.py index f12b6f0..aaac87b 100644 --- a/tests/unit/scheduling/test_submit.py +++ b/tests/unit/scheduling/test_submit.py @@ -46,7 +46,7 @@ except ImportError: import pytest import longbow.exceptions as exceptions -from longbow.scheduling import submit, QUEUEINFO +from longbow.scheduling import submit @mock.patch('longbow.schedulers.lsf.submit') @@ -58,6 +58,7 @@ def test_submit_single(mock_isdir, mock_submit): """ jobs = { + "lbowconf": {}, "job-one": { "resource": "test-machine", "scheduler": "LSF", @@ -83,6 +84,7 @@ def test_submit_multiplesame(mock_isdir, mock_lsf): """ jobs = { + "lbowconf": {}, "job-one": { "resource": "test-machine", "scheduler": "LSF", @@ -119,6 +121,7 @@ def test_submit_multiplediff(mock_isdir, mock_lsf, mock_pbs, mock_slurm): """ jobs = { + "lbowconf": {}, "job-one": { "resource": "lsf-machine", "scheduler": "LSF", @@ -158,11 +161,13 @@ def test_submit_filewrite(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS" + }, "job-one": { "resource": "test-machine", "scheduler": "LSF", "jobid": "test456", - "recoveryfile": "recovery-YYMMDD-HHMMSS" } } @@ -184,11 +189,13 @@ def test_submit_fileuninit(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": { + "recoveryfile": "" + }, "job-one": { "resource": "test-machine", "scheduler": "LSF", "jobid": "test456", - "recoveryfile": "" } } @@ -210,11 +217,13 @@ def test_submit_fileexcept1(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS" + }, "job-one": { "resource": "test-machine", "scheduler": "LSF", "jobid": "test456", - "recoveryfile": "recovery-YYMMDD-HHMMSS" } } @@ -235,11 +244,13 @@ def test_submit_fileexcept2(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": { + "recoveryfile": "recovery-YYMMDD-HHMMSS" + }, "job-one": { "resource": "test-machine", "scheduler": "LSF", "jobid": "test456", - "recoveryfile": "recovery-YYMMDD-HHMMSS" } } @@ -260,6 +271,7 @@ def test_submit_attrexcept(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": {}, "job-one": { "resource": "test-machine", "scheduler": "LSF", @@ -286,6 +298,7 @@ def test_submit_submitexcept(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": {}, "job-one": { "resource": "test-machine", "scheduler": "LSF", @@ -313,6 +326,7 @@ def test_submit_queueexcept(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": {}, "job-one": { "resource": "test-machine", "scheduler": "LSF", @@ -339,6 +353,10 @@ def test_submit_queueinfo(mock_isdir, mock_submit, mock_savini): """ jobs = { + "lbowconf": { + "test-machine-queue-slots": 0, + "test-machine-queue-max": 0 + }, "job-one": { "resource": "test-machine", "scheduler": "LSF", @@ -355,8 +373,6 @@ def test_submit_queueinfo(mock_isdir, mock_submit, mock_savini): "jobid": "test789" } } - QUEUEINFO["test-machine"]["queue-slots"] = "0" - QUEUEINFO["test-machine"]["queue-max"] = "0" mock_isdir.return_value = False mock_savini.return_value = None @@ -364,5 +380,5 @@ def test_submit_queueinfo(mock_isdir, mock_submit, mock_savini): submit(jobs) - assert QUEUEINFO["test-machine"]["queue-slots"] == "3" - assert QUEUEINFO["test-machine"]["queue-max"] == "3" + assert jobs["lbowconf"]["test-machine-queue-slots"] == "3" + assert jobs["lbowconf"]["test-machine-queue-max"] == "3" diff --git a/tests/unit/staging/test_cleanup.py b/tests/unit/staging/test_cleanup.py index 76d6595..d386c1f 100644 --- a/tests/unit/staging/test_cleanup.py +++ b/tests/unit/staging/test_cleanup.py @@ -56,10 +56,12 @@ def test_cleanup_single(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -80,20 +82,20 @@ def test_cleanup_multiple(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" }, "jobtwo": { "destdir": "/path/to/jobtwo12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" }, "jobthree": { "destdir": "/path/to/jobthree12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -114,10 +116,12 @@ def test_cleanup_params(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -139,10 +143,12 @@ def test_cleanup_nodelete(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/jobone12484", - "recoveryfile": "" } } @@ -164,10 +170,12 @@ def test_cleanup_excepttest1(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -187,10 +195,12 @@ def test_cleanup_excepttest2(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -210,10 +220,12 @@ def test_cleanup_excepttest3(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -233,10 +245,12 @@ def test_cleanup_excepttest4(mock_delete, mock_list): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -257,10 +271,12 @@ def test_cleanup_recoveryfilerm1(m_delete, m_list, m_isfile, m_remove): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "recovery-YYMMDD-HHMMSS" } } @@ -284,10 +300,12 @@ def test_cleanup_recoveryfilerm2(m_delete, m_list, m_isfile, m_remove): """ jobs = { + "lbowconf": { + "recoveryfile": "" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "" } } @@ -311,10 +329,12 @@ def test_cleanup_recoveryfilerm3(m_delete, m_list, m_isfile, m_remove): """ jobs = { + "lbowconf": { + "recoveryfile": "rec.file" + }, "jobone": { "destdir": "/path/to/jobone12484", "remoteworkdir": "/path/to/local/dir", - "recoveryfile": "recovery-YYMMDD-HHMMSS" } }
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 7 }
.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/HECBioSim/Longbow.git@7e8d4e78e2e0590083c8a5630c24d175ba49ce48#egg=Longbow more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: Longbow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/Longbow
[ "tests/unit/configuration/test_saveini.py::test_saveini_test1", "tests/unit/configuration/test_saveini.py::test_saveini_test2", "tests/unit/entrypoints/test_launcher.py::test_main_test1", "tests/unit/entrypoints/test_launcher.py::test_main_test2", "tests/unit/entrypoints/test_launcher.py::test_main_test3", "tests/unit/entrypoints/test_launcher.py::test_main_test4", "tests/unit/entrypoints/test_launcher.py::test_main_test5", "tests/unit/entrypoints/test_launcher.py::test_main_test6", "tests/unit/entrypoints/test_launcher.py::test_main_test7", "tests/unit/entrypoints/test_launcher.py::test_main_test8", "tests/unit/entrypoints/test_launcher.py::test_main_test9", "tests/unit/entrypoints/test_launcher.py::test_main_test10", "tests/unit/entrypoints/test_launcher.py::test_main_test11", "tests/unit/entrypoints/test_launcher.py::test_main_test12", "tests/unit/entrypoints/test_launcher.py::test_main_test13", "tests/unit/entrypoints/test_longbow.py::test_longbowmain_disconnect", "tests/unit/entrypoints/test_longbow.py::test_longbowmain_testcalls1", "tests/unit/entrypoints/test_longbow.py::test_longbowmain_testcalls2", "tests/unit/entrypoints/test_recovery.py::test_recovery_check", "tests/unit/entrypoints/test_recovery.py::test_recovery_except", "tests/unit/entrypoints/test_update.py::test_update_check", "tests/unit/entrypoints/test_update.py::test_recovery_except", "tests/unit/scheduling/test_checkwaitingjobs.py::test_checkwaitingjobs_none", "tests/unit/scheduling/test_checkwaitingjobs.py::test_checkwaitingjobs_one", "tests/unit/scheduling/test_checkwaitingjobs.py::test_checkwaitingjobs_two", "tests/unit/scheduling/test_checkwaitingjobs.py::test_checkwaitingjobs_except1", "tests/unit/scheduling/test_checkwaitingjobs.py::test_checkwaitingjobs_except2", "tests/unit/scheduling/test_checkwaitingjobs.py::test_checkwaitingjobs_except3", "tests/unit/scheduling/test_monitor.py::test_monitor_testpollfrequency", "tests/unit/scheduling/test_monitor.py::test_monitor_teststagefreq", "tests/unit/scheduling/test_monitor.py::test_monitor_complete1", "tests/unit/scheduling/test_monitor.py::test_monitor_complete2", "tests/unit/scheduling/test_monitor.py::test_monitor_run1", "tests/unit/scheduling/test_monitor.py::test_monitor_except", "tests/unit/scheduling/test_monitor.py::test_monitor_update", "tests/unit/scheduling/test_polljobs.py::test_polljobs_callcount", "tests/unit/scheduling/test_polljobs.py::test_polljobs_finished", "tests/unit/scheduling/test_polljobs.py::test_polljobs_except", "tests/unit/scheduling/test_prepare.py::test_prepare_single", "tests/unit/scheduling/test_prepare.py::test_prepare_multiple", "tests/unit/scheduling/test_prepare.py::test_prepare_attrexcept", "tests/unit/scheduling/test_prepare.py::test_prepare_ownscript", "tests/unit/scheduling/test_submit.py::test_submit_single", "tests/unit/scheduling/test_submit.py::test_submit_multiplesame", "tests/unit/scheduling/test_submit.py::test_submit_multiplediff", "tests/unit/scheduling/test_submit.py::test_submit_filewrite", "tests/unit/scheduling/test_submit.py::test_submit_fileuninit", "tests/unit/scheduling/test_submit.py::test_submit_fileexcept1", "tests/unit/scheduling/test_submit.py::test_submit_fileexcept2", "tests/unit/scheduling/test_submit.py::test_submit_attrexcept", "tests/unit/scheduling/test_submit.py::test_submit_submitexcept", "tests/unit/scheduling/test_submit.py::test_submit_queueexcept", "tests/unit/scheduling/test_submit.py::test_submit_queueinfo", "tests/unit/staging/test_cleanup.py::test_cleanup_single", "tests/unit/staging/test_cleanup.py::test_cleanup_multiple", "tests/unit/staging/test_cleanup.py::test_cleanup_params", "tests/unit/staging/test_cleanup.py::test_cleanup_nodelete", "tests/unit/staging/test_cleanup.py::test_cleanup_excepttest1", "tests/unit/staging/test_cleanup.py::test_cleanup_excepttest2", "tests/unit/staging/test_cleanup.py::test_cleanup_excepttest3", "tests/unit/staging/test_cleanup.py::test_cleanup_excepttest4", "tests/unit/staging/test_cleanup.py::test_cleanup_recoveryfilerm1", "tests/unit/staging/test_cleanup.py::test_cleanup_recoveryfilerm2", "tests/unit/staging/test_cleanup.py::test_cleanup_recoveryfilerm3" ]
[]
[]
[]
BSD 3-Clause License
2,774
[ "longbow/scheduling.py", "longbow/entrypoints.py", "longbow/configuration.py", "longbow/applications.py", "longbow/exceptions.py", "longbow/staging.py", "longbow/shellwrappers.py" ]
[ "longbow/scheduling.py", "longbow/entrypoints.py", "longbow/configuration.py", "longbow/applications.py", "longbow/exceptions.py", "longbow/staging.py", "longbow/shellwrappers.py" ]
airspeed-velocity__asv-674
1ca6450ea2124967ce77379a0e5fb7dfb66a05cb
2018-07-13 20:10:07
a42330248214dbd70595f4dff8b549d1f6c58db4
diff --git a/asv/benchmarks.py b/asv/benchmarks.py index 40379f9..8dacfbb 100644 --- a/asv/benchmarks.py +++ b/asv/benchmarks.py @@ -19,6 +19,7 @@ import six from .console import log, truncate_left from . import util from . import statistics +from .repo import NoSuchNameError WIN = (os.name == "nt") @@ -410,7 +411,11 @@ class Benchmarks(dict): # discovery usually succeeds commit_hashes = list(commit_hashes) for branch in conf.branches: - branch_hash = repo.get_hash_from_name(branch) + try: + branch_hash = repo.get_hash_from_name(branch) + except NoSuchNameError: + continue + if branch_hash not in commit_hashes: commit_hashes.append(branch_hash) diff --git a/asv/plugins/git.py b/asv/plugins/git.py index bc9eb4d..04ee9d5 100644 --- a/asv/plugins/git.py +++ b/asv/plugins/git.py @@ -12,7 +12,7 @@ import os import re from ..console import log -from ..repo import Repo +from ..repo import Repo, NoSuchNameError from .. import util @@ -127,8 +127,15 @@ class Git(Repo): def get_hash_from_name(self, name): if name is None: name = self.get_branch_name() - return self._run_git(['rev-parse', name], - dots=False).strip().split()[0] + + try: + return self._run_git(['rev-parse', name], + dots=False).strip().split()[0] + except util.ProcessError as err: + if err.stdout.strip() == name: + # Name does not exist + raise NoSuchNameError(name) + raise def get_hash_from_parent(self, name): return self.get_hash_from_name(name + '^') diff --git a/asv/plugins/mercurial.py b/asv/plugins/mercurial.py index e723e51..20cb626 100644 --- a/asv/plugins/mercurial.py +++ b/asv/plugins/mercurial.py @@ -17,7 +17,7 @@ except ImportError as exc: hglib = None from ..console import log -from ..repo import Repo +from ..repo import Repo, NoSuchNameError from .. import util @@ -142,7 +142,12 @@ class Hg(Repo): def get_hash_from_name(self, name): if name is None: name = self.get_branch_name() - return self._decode(self._repo.log(self._encode(name))[0].node) + try: + return self._decode(self._repo.log(self._encode(name))[0].node) + except hglib.error.CommandError as err: + if b'unknown revision' in err.err: + raise NoSuchNameError(name) + raise def get_hash_from_parent(self, name): return self.get_hash_from_name('p1({0})'.format(name)) diff --git a/asv/repo.py b/asv/repo.py index 8b7f98a..43e2f16 100644 --- a/asv/repo.py +++ b/asv/repo.py @@ -8,6 +8,13 @@ from __future__ import (absolute_import, division, print_function, from . import util +class NoSuchNameError(RuntimeError): + """ + Exception raised if requested branch or commit does not exist. + """ + pass + + class Repo(object): """ Base class for repository handlers.
`git rev-parse master` can fail I'm trying to run benchmarks on Travis-CI. Unfortunately, "asv run" uses "git rev-parse master" to discover benchmarks, which fails as "master" doesn't necessary exist: https://travis-ci.org/pitrou/arrow/builds/364763507#L866 The command being run is: ``` asv run --no-pull --show-stderr --quick ${TRAVIS_COMMIT}^! ```
airspeed-velocity/asv
diff --git a/test/test_benchmarks.py b/test/test_benchmarks.py index 0a35fcb..4fa7a18 100644 --- a/test/test_benchmarks.py +++ b/test/test_benchmarks.py @@ -52,6 +52,7 @@ def test_find_benchmarks(tmpdir): d['env_dir'] = "env" d['benchmark_dir'] = 'benchmark' d['repo'] = tools.generate_test_repo(tmpdir, [0]).path + d['branches'] = ["master", "some-missing-branch"] # missing branches ignored conf = config.Config.from_json(d) repo = get_repo(conf) diff --git a/test/test_repo.py b/test/test_repo.py index 88a498c..b10cb35 100644 --- a/test/test_repo.py +++ b/test/test_repo.py @@ -434,3 +434,35 @@ def test_root_ceiling(dvcs_type, tmpdir): # parent repository with pytest.raises(Exception): r.checkout(workcopy_dir, commit2) + + [email protected]('dvcs_type', [ + "git", + pytest.mark.skipif(hglib is None, reason="needs hglib")("hg") +]) +def test_no_such_name_error(dvcs_type, tmpdir): + tmpdir = six.text_type(tmpdir) + dvcs = tools.generate_test_repo(tmpdir, values=[0], dvcs_type=dvcs_type) + + conf = config.Config() + conf.branches = [] + conf.dvcs = dvcs_type + conf.project = "project" + conf.repo = dvcs.path + + r = repo.get_repo(conf) + + # Check that NoSuchNameError error gets raised correctly + assert r.get_hash_from_name(None) == dvcs.get_hash(r._default_branch) + with pytest.raises(repo.NoSuchNameError): + r.get_hash_from_name("badbranch") + + if dvcs_type == "git": + # Corrupted repository/etc should not give NoSuchNameError + util.long_path_rmtree(join(dvcs.path, ".git")) + with pytest.raises(Exception) as excinfo: + r.get_hash_from_name(None) + assert excinfo.type not in (AssertionError, repo.NoSuchNameError) + elif dvcs_type == "hg": + # hglib seems to do some caching, so this doesn't work + pass
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 4 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-rerunfailures" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "pip_requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 -e git+https://github.com/airspeed-velocity/asv.git@1ca6450ea2124967ce77379a0e5fb7dfb66a05cb#egg=asv attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 docutils==0.18.1 execnet==1.9.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-rerunfailures==10.3 pytest-xdist==3.0.2 pytz==2025.2 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-bootstrap-theme==0.8.1 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: asv channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - docutils==0.18.1 - execnet==1.9.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-rerunfailures==10.3 - pytest-xdist==3.0.2 - pytz==2025.2 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-bootstrap-theme==0.8.1 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/asv
[ "test/test_repo.py::test_no_such_name_error[git]" ]
[ "test/test_benchmarks.py::test_find_benchmarks", "test/test_benchmarks.py::test_find_benchmarks_cwd_imports", "test/test_benchmarks.py::test_quick", "test/test_benchmarks.py::test_code_extraction", "test/test_repo.py::test_git_submodule", "test/test_repo.py::test_root_ceiling[dvcs_type1]", "test/test_repo.py::test_no_such_name_error[dvcs_type1]" ]
[ "test/test_benchmarks.py::test_invalid_benchmark_tree", "test/test_benchmarks.py::test_table_formatting", "test/test_repo.py::test_repo_git", "test/test_repo.py::test_repo_git_annotated_tag_date", "test/test_repo.py::test_get_branch_commits[git]", "test/test_repo.py::test_get_new_branch_commits[git-all]", "test/test_repo.py::test_get_new_branch_commits[git-new]", "test/test_repo.py::test_get_new_branch_commits[git-no-new]", "test/test_repo.py::test_get_new_branch_commits[git-new-branch-added-in-config]", "test/test_repo.py::test_root_ceiling[git]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,775
[ "asv/plugins/mercurial.py", "asv/benchmarks.py", "asv/plugins/git.py", "asv/repo.py" ]
[ "asv/plugins/mercurial.py", "asv/benchmarks.py", "asv/plugins/git.py", "asv/repo.py" ]
sigmavirus24__github3.py-861
bb40e7e89441f9d2c084e4a26fb4a2a9a0a4c305
2018-07-13 22:02:07
b8e7aa8eb221cd1eec7a8bc002b75de8098dc77a
diff --git a/github3/repos/repo.py b/github3/repos/repo.py index 68479d83..3659ad60 100644 --- a/github3/repos/repo.py +++ b/github3/repos/repo.py @@ -2352,11 +2352,13 @@ class _Repository(models.GitHubCore): json = self._json(self._get(url, headers=self.PREVIEW_HEADERS), 200) return self._instance_or_null(topics.Topics, json) - def tree(self, sha): + def tree(self, sha, recursive=False): """Get a tree. :param str sha: (required), sha of the object for this tree + :param bool recursive: + (optional), whether to fetch the tree recursively :returns: the tree :rtype: @@ -2365,7 +2367,8 @@ class _Repository(models.GitHubCore): json = None if sha: url = self._build_url('git', 'trees', sha, base_url=self._api) - json = self._json(self._get(url), 200) + params = {'recursive': 1} if recursive else None + json = self._json(self._get(url, params=params), 200) return self._instance_or_null(git.Tree, json) def weekly_commit_count(self):
Add recursive option to `repository.tree(sha, recursive=False)` # Overview GitHub Tree API allow to get tree recursively - https://developer.github.com/v3/git/trees/#get-a-tree-recursively # Ideas It should be pretty simple for now it works even like this (a hack): ```python repository.tree('sha?recursive=1') ``` <bountysource-plugin> --- Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/39848100-add-recursive-option-to-repository-tree-sha-recursive-false?utm_campaign=plugin&utm_content=tracker%2F183477&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F183477&utm_medium=issues&utm_source=github). </bountysource-plugin>
sigmavirus24/github3.py
diff --git a/tests/cassettes/Repository_tree_recursive.json b/tests/cassettes/Repository_tree_recursive.json new file mode 100644 index 00000000..a839f788 --- /dev/null +++ b/tests/cassettes/Repository_tree_recursive.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA62YXW/iOBSG/0qUm7lYSoD0gyJVs5Wm6nSlzszOdndHvUEmMcStE0e2AwNR//u+tgMEpIG23ou2kPo8fn3sc3KO65Cl4Si+6Pcu+v1OWIiUjs2T8P7TzeIr/4Mnt5cr8uP7PCmeV19WNwv8/AwxkOQUo2ZMZ9Uk7pZLPJtWnI+bfyg2y8mcyUoNTqOdUWJRUBmO6pCLGSvAaA8Fxcw+OO0N496unD/P//nxhSdPd737T7PV/fXVFQaTOdFEjivJAcq0LtUoitxDFXfdvJWiMhGFpoXuJiKPqsjhP86vToGYyQZil40He7CSNRxnDJiK9iRnOud7GtzU1mRv8FRwLhag7Ks+NlG0sTS+thRWzN5JgWUdCZ1ROA9LejGOYEq/XZS1qrHFSuPgGI7CjkiavllYYwdZ5oS81JGkpbDAaqISyUrNRPF2gTvWoAk5IwVbkffRYK0AMdLeLsVawZrOcRbfbu7M6qiUbE6SpXGNpAllczj7ncg9exD1sjSB/TcOhXE903RM0twE6pRwRV86oZ1eY5B90EHYvfb07yaClG52FRN+Zp3g7kMekICziSRyGUyFDBjCVpJE47gGC+Sa4PZOf64mH1Tw/eavh+D62x2GwAbxDZ8yuDUgRRpQOROFyFkSLMiyG9wFCyGfA1EE35Y6w59B96ITxN1T8+usY03i7jnWizmfNws7mAjsXu7E9u7qDOnIFh9EINoBgKRnuvTiGPs6wu8mPBNkDjIRkmhxLAcdFrgDqqP2V3M0NSW5l3ALACgTws+TFgAQU6qir4qUwwu3HBWtw7Go8onLoK8JwsNoR4BWovDaKCj18uAGUkfrJI/QKpLMD7tm1JH7ZHebzLykGntgJlxMvDh470YWUkcqI+61pse+6gzVMHagkk69pRrGBqql535bmQayQeKdqrH1XjrXjKhuPMpJMavIzI+6gWDXzZt/RlZHa6LDsbOlAGkKPskmlX+S23KMUleKIN79XLrFbKG2vjlcNR1xQKtOsi7Ic3aszDhMbBA7x/5/wJpzuo82349XRcflGkYdbXOyS/oN3ce7TdZf62zP0XQXXkdizYjq30qiM5O5MFVJJPUR3SCiekJQu3W73TqjxFbpOWokvwh2BKCITDIUoT466zUDVU9OtC3+p0ZmimaAC5J6+XYDAdBto49WR2jvf4nG10ugBbSJOeNUaVF47tCG0mYXQrMpS17TAB0Otx1Q/VGxIqEdwnkHp1azhOEco243u4iCk/p5yBGwDNw7uMaHUxxpL69L6hh15BrXlJZcLL2zUAtjglhSNEvpmGg0OYNef3DSi0/68UP/cnQ2HJ3FjxhTlenOmOFJ78KMGfRGp/EoPjdjykplLcx6SL8/iofNEKTV5lzjE+5BfnEH0epTzM0G2EplW8Pft2ajX9ziNGYJxwHdi6TXzznff9cdN4XUTOS0RO3Ruu5xdnEXjk5xoZGKRHWZMMtiK4w7G/R7uEpq1RiJqArsxrB31gkXRKMcxtu8/XBdm2AS1zCaiYkau8gPR1pWpu/Fk1KKJ5po1X62zTatgQv2zLYds7E0BdTmiesWGw2DyyHyM5NSNDdbBdLDJs3ilqppvEVJi0bTWv4F7vA4S2ihsPLa9I5Yg73mwRKaq7mvzVdVpj/tXZ/D227XfWzfAibnD7f86fHfs9Xjw/VViP7fNaYjq7IlIRyZydf+bNyb0impuB67RgFaUnQ4XJRGDdWmK18rt7h2edU4I45f/gNOlGsFpBQAAA==", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Fri, 13 Jul 2018 20:43:36 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["E1EC:7DB5:6324ACB:D9BA5FB:5B491D6D"], "ETag": ["W/\"67946538aee98117443868c5d535c21d\""], "Date": ["Fri, 13 Jul 2018 21:45:17 GMT"], "X-RateLimit-Remaining": ["50"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.058705"], "Vary": ["Accept"], "X-RateLimit-Limit": ["60"], "Cache-Control": ["public, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-RateLimit-Reset": ["1531519836"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py"}, "recorded_at": "2018-07-13T21:45:17"}, {"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/git/trees/52a3f30e05cf434285e775979f01f1a8355049a7?recursive=1"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+WdW28dyZWl/4ueu0sZt4wIv3ncnnYBY7tRZT8MBgMiIjJCYpsi1eRhXdzwf+8vDlnMLOmcw51SPDTQNuAqiRa2VsZtX9Ze+z/fPLxPb37zxulkmpnq5Eqzxurgqvcu+tgm1VQKxrnJxuTf/NObx/sb/sD7w+Hjw2/evk0fr795d314/5i/KXcf3t7Xj3cPbx+u331IP1zfPz5o+/bpp+abjz/3f317uK/14e0Oe/0PvPnN//vPNx/T4T2Wu7nrd7d395W/zIe7hR++UdM0W8uvDz9/7L/ON3eZXz1hy2GOeS5+0qq0JYaqS9LG+SXXlGyeVcxL02Hqf+D67/xxpe1Xw+x/g4e3YtP/+KcV3+Geb/fwzc8fboQAky9LjDl6U5YlKhWUbTo77+cwlZC0NstSfHIvAMOsBgEUm94A/O1f//KHP3/3/Tf3DwchQB9iYf9NkXXOuU5z9EXrlufZ21mXFrR2LbsVoFJeD0Iotr1B+Ls//+kv3337v/76l2//9K87YE7BVJtztmbxNpuallhCbCrM2s7BqKJ0Kd7ML+toJm0GwRTb3sD8w7ff/+XP3/3fHQiDMcEv07JMlTNZszLO1qSicblZ69S8zDGpqa1H0Wkz6jCKjW8g/p9vf/f7P33/e+E+rVXVPLngZ1Wr8YuzzcfF5LlVFtY6E/lRiGqFx1U7aAHFtjfo/vjbP337v3///V++ub4VIlROaaeijZ4bZVYtq+iMm5vj+lXelpm9GmPV6xZVo64asektwPS32q5vpC9FqmzFqdjoaszNt1lNSwq26LDwGBrTYnUpHF/Bp5fChHnQ8olNb9B99/vf/ssff7/j+GmT8rTMoWY96RB5GWLgHfQmFduSSppX1CWz2Z/Rh0EAxbY3AJf6wz/f1/94vL6vH+rt4eGbw0/SJ2MGqdHWqGRDm2yerJ9jTTbZ2fOPVqaUta+bm8a6QUjFprdI78rDegR56vgPv352Z44u0C/ujJvmuTY3+XnhHjVLKlHNTi+lTbPiVYwcQT23jmyMqya19wmct3/cd/h4AWJ2VuvqTaw+8TzUOKeocyuRN5+r1Wul1kfeujhqc4ptf4qxu79yP8ZMMeoy57DYKSSjo15cK7yEuSw5RnzROTo9hZf7RUc16vUT2/4M4uPh/cMOkFr3O2SqPlpdswlt4eFjv05l5n0wKdq52rCkFaSKX71Vn9xtselPMZa720aAsp7AiwHFpHE6iR6CLywZd2qNk7HJR+UbIdScVHbNuvVu8WHYOyG2/SnEpZa7+3S4u9+zlq1NQZXk1VwWX1Jb8jI3E61iiZuL0aTkiTXW516HUf6M2PSnQOsPx6dCfioL0cQcZ52a0p6nwfhqOIWpWj0RMwK9lmTL8QZ+jg+1H7RjxbY/Q/lT+vDxpkofDSJepdIU0xRccokbpxk1VbwdtdRpKXPBmTO5vzlDHg2xvXO4yBU8HPYsojKe12+xaslEGJpbtOrZOeINPxutop6XPOeeI3heRO7arwb7dO0Uqe0LYPdg9cH6Elk5l3y1ThPOc90oginjOJnLkptWZvNSKqumQVjFti9h7dki+fnU2nvyN0pNxieftZncZJpOs55TarqlrKrPZuMYqDjqfIptn4V7/fDwWHegnXFZ1TzVhBtke94mpkbWypHJmaeSvcGrLamsPoIiBTJoccW2z6M91N2PzML5icpNQYc0FVPY15PhEi6eLE7EB8wqlKrKenR9HOWri22fRXxz9+7d9e27HStczFIXrt3g0+S4dKNNs/J1Mvh+cYlTnGzxS1ijTKXGXVVS22fx3iX8wR1oW1AqFzIF3L6tLOSZMwHovFRlTDTLZEOd4qLMxue1o06v2PZ5tOVwV9Key9nYiccvxWC1XYwKBGquLpNLlveIPGbKKTZfjunpozfBVT0qiSC2fRbvw93j/dHTeU6sX4xES8g6k+DiOfIZx4Ic7ZRbbWQsdcwhkJFdSAr1fOUYp0Jq7xV4b/P93Y8P9V7u8ceelsTdJbQuU39q9WwTSSJPrWRShTR71JXwdbOJR4WmYtOvgX6+qK7qT3Lck9IkTnwkPdRDcEK4HIr2s67RTtUarYsKtq64ox6V7hObfg333dMRvnpIP8uBL9pHE9MSMu7FrELlgVJlCW1OFleyEOaZQLj3suDGj/KwxKZfA/7jffp4dag/HeSwVVXN12UxVAFTWQqXNYkyUjDeFUWqfuaok+rdlMrmUftcbPos7MOPd1ctsdr3VzvfKNzKJQJbxVJzKYZfzkvQBLekuWMgdHCWOPdXEeAon0ts+1PcPUDaE+b2resMCSeTLTnCudmgSaglpYkXWuu1w0RudONY4oV99c39FCGJbX8Ocs/b6zMFCWMqt3GYlKpGERbEZrivbAhRJztRZmqrZ8VvDkIoNn0C4b5wiGtYLV6TzfYqLhQmlqyVxbdyLbpGdaJOWeOAvFxNwY7yL8SmPwV5fbvw7MhDvtIyKcOG4xQVIV/JdrbGE/jN1JkojdpAvoJK4gtGF+yoh0ds+zOQPdLbcyKtXoIjiZ0KB9DFydXG+0oyKqXQUxcOxkWBifGCMg7brmLTn4LsfuHNHpAtQIIgvqPuguerlkjZgrc9s7y+F7MnT3hry/qkcDkNOpNi05+CvL07XLdr/P7ru9s9WD1Xqy/T0uZCjc30q8j64EmFdz+COAhuRtRtzVSQZxyEVWz6U6x39+/2QAzUL3D02ZilVEhBwOKUNgqkXElTm7zKs13iejLNuPq91PSnED8+3uzasmqxc9EL6TU1e5ISdapu1hkW1Ky96q8k6cUc12XkoA5aRrHpTzEeiVc7bljPvYofSzUmk2xQy2Ib2US8vOZImxoKYpWfbpgYhu08CKTY9qcgH2q6L++vHg73j2WX75MscZsxCS996alh0t6RugX7mAAntEod3EOgWpk1dhhtQWz6M7C7UYKCEhsFVB0LSRY89ppwgyLHksJjLmQjdNF5Le9T9Bi0pGLTn6I81IfDvvSZLtVOc7EhGgrCbilGw96rgYJNSKRbNOyUwuvy8mqqyflR+UKx8U9xPpJv2HU+81ScJ/FLJZXo05Wge8oQZlhM/Fep1qDbbJLecP0GrSW5HJnpDcZnkqcwcYTbPfup4BPU0Ii5HAtGpb8Xjb0Dp+ZyNfVYBx+SOBLb+xzR26ur69vrw9WVPIjm+ccbJ6vn7ASZhhCyJWJI3TyFCxJkMPuoE7u1AO6G+a5i0yeAdiqDuAQOUYOSTEuNZKfVySly9bNPvBxeLxo3KGlncHjWhJi2w94PsfFTKI9sBjHOeWLdKHVT/+UGJQ9UXVuWEGBvwJZqurqQYA9vGGFqmEsntn0C5qbaL8YK09tQbFlyMnBRyPrx/OMSGBVrKI1LB8KU/jVWO8pVF9s+gfW54C/GGagQu1wqCbxGaThB14jOFs4OxUWfdF7CTEJ7fSbhoI66W8W2T+A8JnyEN2yjegbjFF+AMjexJK66qin6ZhayWQvMHE9w2TpxY8gNK7Z3DtcX3LPGQ0nRlaA5WgchvJrk8oJzCdSi25GCq1vdEI3iKA9WbPosXJolOqVRfhMRTlJQgY9D+hlaWJpyX0JTa386vbcL6dpeOt4kC0aV0cSmz6LtHFw51JLhp3S+piUjgvcaUnKtBhezLQ7aFX0b3oSyLqziq3z1Rn7mckhtn8V6JK6IL6NMpcwEG8iChDRXGIGEJbAbqKJB0YVsZReIgpscUMCrGIRVbPss1vdgvbvfUVfhVM5W50AqOgeI5DlRAF7wZR1dDlOvo/ENtq0qNLOM2sZi2yfh7lhTnyoJH5sqt1OaG/F0jJBUfJwK9W4KSeT3+OFa8u49AYPWVGz7NMieihbvXVN4T6xuU1NLSDwtNOZwaOl5MNzFczYkakmfrJnoGR7WMD6r1PgJoEdOjvQpnecEpzou5NkTt23qrXCLLmSfFzpKUsBpChT4e8ZryFMqtncW2Be8pXCMCtQuRXWkb01KQ1llyu0E2ZZkLUyVGuEubConwxhlYtPn8e5+TFkyE8niQSijVETyoFHjtCnNi6ZdwJRpJtBWeZN5d6NqKGLT5+EeHV75Ma20eNS5gZfzl2daP+i3ajj41moymoEgJjv6ItdYbRrGrxLbPo/2iUAnvpSoazae0O5DuNAy6T5ovYsiS0QBqfe8kLqu07TtFYRY+NVn98l7EBs/D/cGOvmNfHG9JoKZk6G80sm9lM1a5ChXGBox0s1EmoxWirblvQ5rdBHbPo/2A44hLsTtDu+QHmVymrbZDFBXciAS78xeXltNrsxRZKEJJm2oGbMbFaaKbZ9A/Fw5E29ktu4CJd0QjyZWsFdDHctbJu6qSpEFliBdFJsqRCfUjXpdxcZPAP119UyMlxRApOqgEyeXQztP0dIUQ9ah0TTpnPM4FYbn6eWacrS/Djq3Ytsn4B4LaGKUFJNsopO3LYHwlGWlp5ciN7jIeqbCPXXs9N3QIenJG+Ucio2fgPlURBPjtGDAq6cNfZ5Y0gk6DZGc6R1Bs7KGbG8IEJw3MRx07lFvrNj4CZzHQpowy2Jo1o4TjwsM9OMrQ+K6N8HiOTm6oDKZ3zkkNUw1QWzvHK4v8AwrOd24pAnvMGWuopKIwxV9lIbriMzgPMNghp3/cizhSQ46lWLTZ+Hm+3Rb3stPZwyNdicOYloy3i7hDUwM/AX6C+gECCSAYaPQ0rYmWaZRYMWmz4Ld7QVHPUFpzZb2UShgUBaoM6lUYQBOPkHYLtVD8tx4wVCVR72kYtsX4V7vcIPRiDCKEiH53pkoPFNtMvPSkD2JpEYd76siJTqv0SqN7qOeU7HtS2g/pvvrh7tb+W6GqgvPx2lHnNqTZ3D/afKe7bwY9FJ6ODtn6oqbBCkVqkFnV2z7AuLbw7GTT/zm4C1RhjJuIsiB6oesCwjNBKEVNgfVOMPhxmlaG0ncHEYll8S2z+Jd6sebu5/3JYXJK5VMAyrRHLEczW7UMgxdqbFTH2n3o3jVaBha93QPBwatsNj2WcTv7+7+tmN1g4Y3Znlx/ES6FFENujPZ3Sx4LlbPykzJE/G93M14i6P8Q+q3MttnsX5M7+AGirdyZ9vorsgAzOa5kGkbgYBPNQcRIwgB9P2TP92SHOhDGLSwYttnwd7Xm5oedsR0+IQuOrCSJKUhyNEf3/D44QOwppnKeQjEO37NxwSCnVFwpbYvwP14J19aZA4mnHxnbPerIECWTkgis+gL6agU0XSY4UmuqTY4EdOoQys2fhbswyHtuZMz9VV4VzaSJEWUisQEJKWw0PCGZBUJGnqRS4MQsbqPdBgMWlmx7YtgH/ccW3YsyQe288yumg3UgQIpGxIhPXxcXgsdRB7XZ4N2GCuis2dEts+iPaR38m0cKK1SyqFAx9Xr8I51hjjga3Y0zvugEOuqNsVNz8TkRrlTYtsnoD5xCIUBHu0gqBwlkhK9lNMdCyqS1DWsJmwHsUsF9YNj8DMk9y+2dxbYF0R4sALokadfTyc6J1A80gtVJFxExI4qvwoQtqEMrBGedqOuI7Hp83gLhHT5rkXgiZZEWEp4SXOmfQJdvAmpPNxEnH/6VKdpoY61uY5o3/zqxX1KDottnwe7MxdODb0ikUensSETPuMp6VwhgXqLRAK1up4yLiWtAcA8LBMuNn0e7DETc72vvoycFSJyJGFsThQlI3ztOdJfoEiv+c7VIj2cyF68XMBcYYOWV2z6POJOEpXvZQrMtI0o9msgG95XspJ1gu8z8Q6gxAbJmXigrqRmdKEGYRWbPon14YG+CjlOWmDUQjrYKzjpvYm60YZLNpxoh/pGzrAjUHBJa2IYD2LUBSW2fQroM4Fb7PTTdEm+n9rcgubMpBJCABVpT8hoiO9QAIBE6ogGVu+B5PEoL1hs+wTQJ26zGGYLXZSU3GFDssTTI09ABwEPL7cLs9F4i5hnby1fnSQSMaM4L2Ljp3AerukjEeN0joxpI6FPkwEnFCcCZiwA+S3aEExGFMAtiAquOGMcBVNsewPz+kOPUYWekSFGgxzQ29+RYoHxS9ExmoIKJCBQDczUOGho62XWIZ6R2N5ngN6+e2/+meZ3QrTbdyu6iwpfNP/0W5XNmRCBjLXqhUoqiRS4AsQwbF2H+2c21QtAj0qNio1voPa38v5wddI/8Edh3FPKyHQyRXoIYLN0LSgEkvEVUDSAAkGsXaGHLqUgu7fZoXgTX72gT96Q2PYG5UM9PH78pjTpMjo6ReI0KVgeuRYHC4J0AgJJyk9kuJGWoRkagchNsW3UAym2/Bm6T66XS4u38C7AEjRpIvaiXWviDDYI2vQ6kcFH8Jpb9qi+8Sw0Mq4NBqKMzPYGXu/0kd4uJaIgSNkwT9qmBU0CJC95DBsNP1w6M5EYlBVcu1G3i9jep3i+INxC+XghC4LfQtsdbc2286wick/eoTcBjZnmZkpua7g16k0QG/4MZEkPHD0WUPg4FGq9XJA8BtBSaFenEY0SWvTEmOhZO3xwbhx+Omz5pPbOI3v72w7xarn78fbmLi3f/DuVFuFjgZAeR80iVUV9gWJLg+9pYqE7hmXUOHJ0b/GTTSakqySPei3E1sXYr358X2+PEhqUX3oXdN31ObpkYiYnNEFzQHeOxnbIZ6Ti4D3MXRqatEkjN7Z2CBOhkQod9KyIrb/6OepCdXHHNkCQD5EcGmaoHc+GzraZhIqG6UvpGNFsnhp8PT+tTKye9h3FeBAbvwD7X14KUFflvrLsNNqmA+nPHR+BBmkcw66XzewIeEq01S70EkFMQk8bNR07QTusYS1IIek/6huIbcu+wRN4ijY74ONBIX6F9hU5fXqMAg5GoSRlmaKBkgOqzFzvqLytoWqA5TRs5wttX4D/r/D5rzqP4Joc/w7cuIkeoQMI4Qg8UGYm2611D/Ho3OgXIcQJmxEaffFFkC4ZlVYT234Nd7u7/9su1PCXMl1WJPvBSrERUTvbmf3LNAUiIVTHA9s9rvVm8hKj7jmx7YuoD394zL9jksoVoo315pplv0JqPdHxiqLhjvWnsYFyJToteJ7zTHyrmMCBjCMNhOhlMQYBWip1rrU2iyLpqPUX2371S3z7LF7JN8AXeLiqvSCy4yto+jzgVqdAky+i1yhG0YOP4GFXY/eQxIiN+Qgb+Q/SH2FU1VZs/NXP8H09JuiuPtHTenYGeupuxzdZqJbAoeq5DkIUvIJCGZCOn66q0fUnoGJzcWxeRXIjo1wjse1XP8kvDyLaN5V38W/15z3fwM0RjQJVNexdguuereWO8Mj8kKNOgfQC8hT0ILzcjl1+etCrILYt/gbH/rYdO4BWCujYiKfwHJCihrntFo+4P3wkBx3HIA6EGP7mlqQ+Pgq92LYY/VOGZQd8Ci0FntUMs9kxGYZaIgxRT6TXRxlN+MOV5xINzZfF75IzgxZfbPt1+PXD3b8zlGoHcFjcvIuxeKQCaKDWNiL+WktGga5rW1FDhac02U1ySdM9Ngi52PjryJnds+wDXgvPPqtI77FFe4RcCQ0ZJXPmIT4UgnvaFOhVfllxfKdRPjBHTGb7ddx7zzmEbzRKYK4ornokv1D2guhCp2cjhZppuWdWWJvbJmdK89Eo51dsXIL7ebwc2plI7OMY7dr1qOsHxltNk2UODBqxph612fvIOcuxbwhjeHLLm8VXo5KOUWr7Sz7CriPA4AQykyhn0pNO+wajFfCAaLlDlojkT64MqOmkpk1uchrWTyi2/fpX2H3Zk5mEx10IeBm34tAPJTlbLc1okNmQzSXRhxtY3JoDoOA+ygEU2xbgxhG+Sjc3OMJdVWzHpQ+BDVGfrhqtuAGSnjOMXC5B9j0leK57cvF9ON/L9rf0voz6AmLjO77AU21zxxfAt4Fl3vPv1PZ47elrpwmSuTC5l6oZJYaAAQ1qm4Y8RBBHfQGxceEXeJZW2QGflG9mDg7zm/rgNNpEeI7gTaHEwQzOTFGJln/k/H8l5j9MvEFsXAi/3d3c3P3Yhbt2fAFEOuiURhClwtplQFDwTJGjl43xHCFMNCFwJZIWWYtOiiLqKL9HbFz4Bfr25x1MH3Z9AsSTSf9aqmwZjf9pggJKwqfT8S2jAhFPpxmX0vd6C3AiRnkCYtuvf4EPJAD2rLxDsGTBtUWCLxLWEe4T7vZqB0EfqpH0hjEiytbN3qcJfZDDK7b9OuxbYv/6E1EeEf/Vy7DdL/KGmIxlkMpEID92YW12BZpZmqxZdZXiANcgmnZUY1dvyA5joIltv/5FniXk9+wFqq99jgdlZtvnl9BL1rWILPpSTECDlcWrYJDV2gytGTfLVWxbgPz+Xbq9/vtRCXcPfCogDAxwpEaDR5kxMAMDn4hNYHgNIALjBChKvJujQK580FEQ234dfm96xQn6D9RCduU8mBSgGdmDSFFT1KwnioMZWinpLrpDUUDO3ATIN60lQT8PG3oitv06/J4VvjqmhfesPTQZanwcdtQBmSKA1A+tHb0jln/HGdT0N+AYTJtAcFwyWGxbAH6ln+54/RtiIoU4hzEJFVcIApg/joBZ6N3nCkgJIQomBK7geR5HbXyx7dfBPyvtHsnVO9BHBDCo+fdBlpT3YdrGPtgyk/3EKbTa2V4dV5t+NMPmGHTsxbZ3ob/6keH2xykaVx/SgV7iHV8DfTpk6lEyYz4kLFVyXYERE5l55dQDly5dASO5bkQymQo6ai+IbYu/xpNo0i789IijPqLdzFBsvgHRD0PN4SQvtBsgsX0kpMDcXV//CWmAUbEQs3lk1sVf4IWQfr2vKozUNnSARArAIrTjEfGDAIMjTL5TLxVf2FraDzbuMP7iMKlCsfEv+gxfejIabX7Q1al8TAxHjxE5E25MuN3QRPpRgSFMxXgNki0UkVFZcS81Lv4iu5MEkHYYc1C4FRgpFGkDpC+P2BhBqh400sHcJYThzKwngyTaoHtSbHsf/K+5KCkN4S6jWF5gDCPE3sNFEuVQJ/gtLkX+gbe8GWiJDzEqXhTbfv1zHNs3drwPxEF4BdpR+6M7nbRwaoHpnajJoa1Bvtgxpp3xQ+suoGI26hCIbb8O++91V2jgqf/1yggKXbyNaE7Ar47a9rbIhS7rgipkM9OyiQkZXTto74ttX0D9bRcPvLruKdL96THlAllx1hxBhomt1wWX6eAnEI7Mq6efEJ0GxgOuFx8D1Ua5BGLbMvBHobJdaSGIQJ0F1aDF8AZGb9QCLxKimKXHu6BNZ2aP0swKHvrMqFtPbPsC+D/+Ilb2tPr7PwD5P/hRxyHhqIrDDUSwjHQpg7dqREMTkpyhLW/bDE0mfVSHj9j4hS/wb8TD3z2Fw79QIu7rD9f1xyNprKsS7rj+EGyuaF9RF2ZEDkOs+Ap9wCCZUh57z4kIqBxQM1wfQc3wp0E3gdi48GvgG97sI4XA/cEVRJWEJgt6oOhx4q6f2Q+IDqGc1XkhsGQ2NFlnhrlANMDKjF9A/92TyMPVEydmz8LTEcVUX6SWAkny2DWdaWJ0EGBgTvuuwo7OFVPqVrYY98CoKFFsW4B8L0M49/wfzl5XweUJ6JoebHs0eagQUSKojHuGL103aXFo1IP2u9i2APfx7Tu2BOy6/skBoirLUMlCuEMajPQvk+dpWDGBgJgsme5uwGYyLk7gqOtfbFsA/xGdnrQ8fYA9276PeYP7wSdA4xuNWfKijOpxDGvvA8M6dWI6ji5c7zte7FHEELHxix/gl4bsf+udgledGwFf+Chtc5Ufr2929Qk4a2NDyd4jOAYJjLCPvlA6z6gdda4kXBk4YxSRXj4HTJpRrpDY9o6v8aTws+P98xS9SI4g01TIDGdoMyihaLS6KkOpfM+jJXT1NkEwWkejykVi23s/wNM+2HUv0GBJkxeqy1wIKTNOmAQx13QfydXlqGkQRtWVbvaXjYDeyCinSGxb9B1WmuiLlNeO/QBfBtEnTgFjglDdQ67ZLxrk0AQZb5H6QMxGsLgWjudxs/XEtnd9B/hTh5+ver/nnnuywpxrUx/M0oWrTYE2AaU88EEI38gRMfRLMct10wZPX/ygZ1Jse9d3+EUPa8dmMF1aBp6koVBEbMgU9f5vXToVyrAjb8oD0gWf10Mxjjctti37CE+J42MzUe2soj17AepwhcsJgw4WUVfgNPBoAxmzFnEZu64Hgxnx5dc3k8lho3xFsXHhd+hu06ryt+uOLPjGTAOxVA0U6SEEeighQC5A5gMNsT6UcqG2cFSafWr2JZwYlSoS297xGW7S7bvHoyTejjPRieO4Sb2tCnkpaDV0PONAwRqmp6Bzq+k1a3UjqEC4OOrBFNve8RGer4Vd3wCVYeoaeM2VOTh0mRfbhS3hGHBX8kKQRqaZouiNxiXsokGXo9i27Bus2u879gD9cgx6zKwsap6zRdbI0zBtFlpMo6Hjpgsmonm0we+GOU1i2zL8W3LBwxUDdevHw9UDMg57zgQNBQ1+JSVXWingFlWIZ6iI8WbSamTgoZFSYsrr+j3mEEY5T2Lbsu/xBa8kahwMiGI0H122MO0axVQkD3o6nRuBFuwUlCvEVeu1OG60hdj2RfQ9efa7p9xZZ9vuyyExgRCZFRptKanjE9BOghwLhQMFBzXgSzeKZqQZNlxTZ4aJrIuNX8D/104vPKYTjir6O+6BvCwI6DCPsA89wytgZAvaJYEuE+ZP9jZsRVYJYu3Lyvf6yqB7UGz7c+R3t63HzHIRodAJ48oy/4ypWTTOUz5A6aNfdEwPYDAEZMMGv3R9+JF3HgRTbPozmO36p8MjTZRymDTPovHRIEjBFYDJkeDJIUKDSCrMWYfPxzev9BJtDvIomGLTn8G8RqD6HRwpuHH8vfp8jze/edO571O/cJ4Fdw739Ujzep/4YSHS55pGECMxQjRAh4f/EPsc9WJBCTu6TEyJHzaWUWzvErb/GRonm8V8+77efNyjxBcm/I8arEPJzCE53lM4EY5LSch8JVLdVHc4qb/SURrlmIptX1ziYyJv1wTgiekWtDkHShi9vQMaL6rytDsUpMACM7XwQvi/bNq9yGyMyuaKbb+O+ThUVX5P6cqkDxjNikAcjYuOmzgdzTNETRzORm92jrqt8Si9rYOuY7FpCejDrkF/ONedvcQcPKp1FC5I2MFXBaptuOA8vQ2yJ3IPaxRuIDgNwi02LsVNrfJ+h2QsSUj4mvRuMBxiyTmgA6wQuiP4oHRD7npx6FMRh7+AZwD9IOhi069DP6kMB0lzfah6mm59qBonmDQccwDp1F1gcDvHJUYFB1U40g5kZchSb4dPqWH68/AjZbaFqB+uvmDKGIm35ZiYh5PFrHY6tpkPCF+f0UJAR7kH4TWFjtfqkAzr4RWbfh3/zqFNKOVBP0HxOqPzx0A1i+eJAF0X22cOq6rMeG9orm6bt4ZlmcS2X0d95GluMm3y651hu7QnsftgadJMS0zRoOdAXJ/h5MFXJNXC057WvlWFysegwy62Lf0AO0dOTERSS29ahbRNtyaKLdxzuY99Mn0KW9B4M9z4m7Z1F4c96FLbUuy7J1AwN/CJg0QJ5igkNffIEsF+Eu6MMaOPF/GupayZBHp8R6282LYc/Z6BFCSPIWFAuiSbSgqZ9qReiu9Ne6g2IGEW4KLOkPjXqw4q/6BNL7b9OvSHJ9ka+WHvPaqQERYCTKTayJPAvdQIFjAU1TqXDD5NQbdq06QzbB6U2LQA9m6VabjWjZZE9F1pxe1C2mjXss8JvulOC11ku4t4rewb/PtBq12kpl+HvVNzmjQ5DMPel5JhWJArxa+hLdlmmNakT6Cbo4/PpO6XPW6GTYYSm/4M9K/TYRcTC11orXGIUbjV+KokEiDTLLXMbcFNUdxf1ExovwLfEDlmsb2ToN6m+/L++od+sp6zJhfVmPtkZlhijONj2iJ+AkNUDI2jE1p6S19JyOMZVZ1NeD0s0ye1fAZm50gJQQanVXOZwWw8w8wv7t2xLCDz+TicofLLhXXdjKfDTxlV2BHbPgMTqdC7++cuUTHcxpwGJv9QxbKtD6WL8DwSOdwFIggjGryp3MKrt4Fu6Ffv3SflaVIWMtOn0T7HSZKNi7oHEvC0PM5IfyF5heARYsbMse2vC1EFEymo6oc1P88YnUEgxabPgDwOzhSuJQwFy/wAz7zejKx36UInjCs7JoSKodsT2QMGcazpAVhOo3CKbZ/G2Rscr9qxw/e2/CzEqxm+5ph0BDmtEf3DVUbfLlOFgbmsIC4jwkCr44agokc9oGLL59B2ZU8hyrmrWDJhG68Qnl7r/RkB3TYm+CLdZfAHG2ERt9XLk4mkzyjmhdj2JZyUVA/XP1wfpMtK7jagWo2eJ6EfRbXUUDJFsTolNIwtlAskTnVsGx7aqABAbPki3GcKvnB5e3GQRgzmMM8T45lJcNCXAAsVlxclAtglNHH3aeovy8tszVGpTLHts3ifZ6EKsWr6DElTg5bRBgzG7Fo0TG2mE4WzyhAShyAXCmUbB8JCNht0E4uNnwP7NAZVCLVzKH0mHUkXGeKinVYLIyTScgxhBuoUA0KXhednXVaYM4OQim2fR3p/nR+Z+HQki5GZvy4byfmLPqLrQ3yZGkNRjVFBSM46Gm0CFYoJ2b2ML8Eo+T6x7gV3HwE2CLfY9hncT2Lb5e640MJ1bujqUjKlBkOKlslllXl7CxVEFWfyk7VPEUJpIW8GmYVhPBix7dN4u8DwjXRdGf3Ay0OvLEKmytMejH5SdUhpawZlUhLBZ+ZHm4Ftw8a1iS2fQfmDfDEhbaWZUYrwGeE60u3S1YISaVhS4alC7OsTm5e23k8whQftXbHp0yh7+Uy4YxlMQvqcwBFFsGnWqKD3YTokWbicaQODzEojGKOv1puJJulREY7Y+HmYv7S4CeGGgPALc8ka5VDFeYSo2afN9LEmEXozg18XpPC3cAnxBi2q2PYFtO9Z2bt7qe8k/r6/8FPVf6fF/UXzXri0SBnBx8ZNnOBi0rBRmPpBI2ePYSOko0ojs9XUTtadzAiUQUsrtn16afvIbSHKPgAU9cLIM0rLahespTcfBwqfwhEOdG0rJlKbNT2KjvMgkGLTp0Eea51ClGjz6JmW7NI7TBgPCc0o9dCVucQotyHNwcwK5tyueQi8i1FrKbZ9AeYV4acQ6gwrlN5yOgEp3JBRIzvonaITs9BOwXxMBLkI1e2mxEHNd9CKim1fgrovwEFv6zjVrMdsXY8RvS2IsNzBLi+WFiKUR2gpyBuNDdJNg+CKbV+Ci/9Q7z/eXz9I9zLCYuxW6MC8N8zfy+hu0WCPDh61HP6nUudg1u2GK8iQ1lEsDbHti4h3+Ey0RCEuSe8DEpvcUb0LBFcxIzsLPYFCbUN+ljLH5noiuh+0vGLbp8EyPkB4ZGkLRDKO2WA0R86Md2HOCAeYPiBGTTWLWAhR7a9mvNEXMQij2PRpjEdxBCnKLg5NeZ1hxcwFm7sWjGIQapfF4Ix2FTGE1HhzNu/pqJCV3haZ6XMon9pYhECXAmGM0Whzpa+Tmjp1NvhyyMQxX7uSbKLdd4Y9tukAHoVTbPkMzvoulZ+vjpGbECtU/Joh0ECI7KluhE6mivijYQgYM4l7nokoDsb2y6LiMg3aumLTF8Hu8SKoGHPzWuh/M8QJnCRo90g9M/IEz78L/pvSJTDX2AYO3SCwYtMXwXbKjHBh4dvXIwGMyQ4eAbPeYRCoxkEQnBjwR+c2bYk0H7wsLNIWo7BKTV/E2kvHQqycVbgODNdEqSPTcwcTEBn7QgdeYt5TQ+aaIRY5r4QoBB4GYRWbPo31Q71/J/UbYP3wnEBEz+xWSqkZaq/KDDHCNaaigYrnzNQOtYlnhmkSiU2fQ3no/pukFsf0ZRpkAuMYGiQnYhn+NzKkDClS6I24wrAfiNXXbYvYzKClFJs+A/IX2SEhUsasILeOAivMRfz5IzUZ2montc3VQ99EW4GO6U2iBbnBQVDFtk9Dvb07XLc+cvJXLRUXE7/wApiWhGTM3HsAKZ53/ZwZxWXuJ+ZMMWnFw/Nxm6gGhYVBaMW2T6Ol50m4pEwOh3NKvRipEIdAHHU5pixAfDAEVhHHltPJVIWthzSMjiq2fRpkp6AKUTLZNyykTjS3AppvfU4WRxatPyRSYNuSLiOl5jYVR5x9MypnJjZ+Huaxwi25ixiBUiK9zDOuYCysH8xij9YBE6qrhmsaaS+oFJhfnlAoiHHUERUbvwR0ZzK0oOBAHM6IaiaCkAD11N4gmJIAhhfBAEeSS3NgfNAKmOBm1CGV2j6P92p3KA6jBg0XmjHRwe6i59y3zLjFOUzQDfkUOMQUWTeUCKbWjKTzyIxfQNx4dKS7GU0Cxvsh7I8LSB8QqnadPjzPgVgOXSvmfRzbUl8Wl+h80NpmqekLSJ/l3YVgDYELEy17y09PMEwUWrPBJUTKCloR5WWCHXLAG9k2mlJHbWWx8dNwX4Z8CrFSMaWPGtlmEoMhKiJwbidiOrSkLLcIm3mqrO26sMPKNWLLZ4DWtHyQbt+CuAxMO7RXPfxnU0kOMrgZVVJ0iOGIMsDCMlhrWt9WM3B2rdT2OZz9byV5cRB/tFA9HIpS1FPpaWASN/rspFugZhHMEbbh/m40llAJHnRGxabPYqST+P3V/p5iUry0gdO5Ac8FJXb0APD9UVc6hm1weVqhjsOs1s0rq0ZFb2Lb50Df1CTO+yKIsnAmIYnOpdXOA8Zv8p4FnmgvVokruPRM6XoB4xQPWl2x7XNAxXkHhgvyqnlGCNA7vSAV7uhA9NzCDT0cJAV1ImlI0XVFqYYF42Lb51Hu9JjEFKmXgup/G3rWrxVhpRcUr0qEVAnLjAeTagL5bTSeFo84oEYXEFcCNY+wpgtpTBqVHA1S26dX92lQvBAnE3EcF5GbkTWzNQYqNhCVUMWElYaKBYk12v9c3Q5HGTYOVmz7DM7H/FDurz/uCM5bd5YqtVV0m6COoueLOhf6RATOzMhiImqtUJj0GpzTezPoYhKbPg2W0d/CFYUf6mkcgu8L5xp9lQLfDB0AnAR0rfu8a0TcqCevjDvmgA7CKDZ9BiNz3YQgIejAlIQ5yLhictzMNlrweGkGLkyxgK2PGhPZ4I0+BwMtBoEUmz4H8mmIqxAorY6pTOjyNo0caxfkpckbij7hOc0mM7L88KEJ219eGeaVDwIqNn0ZqJRZR6tQgQTbpwwgsznR5QczC0l+gCK6ggfNwCbKqCtrPwzLtYhNn0H6JKki8Xsnpm2mPhSZZvVloqEb8XlE2C19PdxBgVAVGYe6GUjEgNJRiRax7dMod5QpqK7Rpd/r4LAbepV/gqrDXFVu3YWpYxSMO/e3bNIrPAWDdq3Y9hmUhxaudkCF+Uh+kH491DeYFUHLG2lRyK6G8ikjp0kTTohkbhr5+MUoP0Fs+zTUH2v9283PRz/wujPOHsVkXz3DASCKaYuquVeKaQxTil58aB4LIleI4KUZCe2XS8kOi97Epj/DvF9SBo0R2GV9KmJP9VYGJTZSLPSfz2ikU0heWtEMUF8pHVD6Rx1Wse0zOGmB2yEjQzIF6hzRJy1uHFsIv5byN9xt5PrMwu+jsgBDa1NxY4zioAMrtn0a6fPMk49SCgsBqZoompITg/DAE9PJWIqmiAnRJEtdkQkXkw4rVEOcNwiq2PZpqDu1gfpER4QYWTiUExbEaqNmvgUFGkoXyAngHXZZQr+RaqNLbBBSse1zSHdI0mWCFwSPkOZmlCFNF7nrgvDMNqT800y5iCPKd9gI4oRhUgli22dx7hI+grgBH2fyKHAzyZCUma1wQueZ6ilUneOAOSQX27qkbHM96koSGz+N9chfuUrPvbn16u4YuKWbq4dy93GH/hHyRkThiEnm40RL8kkJ7jPcFsbMUoikjs5cV3TbXx4fZO1HORdi2xc+wY5rmdoq2rI8p4wiI3U4hcpACh4gRjXBTkPuNDDVkrneK1QivlHkFrHx01i7N8w4JvG93BMtRKc4TkynpfNv8kohVkbnOQKMJCN4iHMfyPCCFeXlUZGO2PZpqFuWwA7EbFdK5nSG4SzToYwnYXqVCp4L6kakhtnYmlEDm0Q/79ag+1ls+zTiozqqeGkt80UUCgJohTJOAcUEWhUYPNP1e2ZonZRh+8ztvNnGCkmFQUjFxk8j3alYxXxxxEENnuHSCVqIskH4mB39yYGOON+HSlJx3Y7WDH7UgRXbPo30qFIlX1QqChF2C0VkpmhXUqSQ7Cx0bt2HCFrXuWmx4dutMQDj5kdtX7Hx01Af9mrU9BmV+BRwG9mr9CvA23J4VJTRKzx+Olwdk6QRlFwvJ56kQRtYbPs01p3CNPRe04FBRS5xKSEbgP8PO7ZyUBGGSLUwHWdqOq0eBus+CqnY9mdIH2+3MgIXlWm60A7QMhxuIleUmmlQxklakKRhkZE58cw+R1yLpRyiTCO2dxLU/wyt2758u0VueTmYz4i/Q8mYoe5QuZnU1BZohuggTpTQaRLDP96IjVOu+epFfZJsEds+vajHVMQ7Bnk/ZvmFiw4i1yqXrEZriGnNHneBUL1fPrDUOq8SuVe0llZnMJpRDpLY9qt4r3bLohny22SWLMUZmq3Rz6dyThaIslzFL2bt0edmJM360OAdj7qRxLYv4H5Sf9wtAEifWKUghx4aXVPTPKPtSXsGRBBCPMbTIT9EPgrdqZflZq7GKB6p2LYA9h7lP+Yp4TPQjVzRKSJJwegtxpAZSBJOU8aaOt89EN29YEYFd1RuRmz7AubdfgXRO10YjJwjSYHbWGFgEs0S21PCgj2AwhjRD4T/9UwzW2fQHSa2fQHw4wHlBPkVhiA160kA0CdjIF8J1YdaB8kLhbAtyUa8Y6Zj6LUugPDlqD0ttv053H0gjdNMR5igp0V8RQp2gdl5Rz2/SOUOxbQ++pKwdj244xKMYttbkHc/fXN9e81fR1LAIqtGiQpxzt4YxszgLkxqyJqiw0SPMkl/PRuUezb9ycOSL2LT//j/TDu4f7yFt1+XN79p6eah/uO/AAQ1ExlaAgEA", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Fri, 13 Jul 2018 20:43:36 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["E1EC:7DB5:6324AD0:D9BA611:5B491D6D"], "ETag": ["W/\"a533bd9d3c8208fc3fbb2893cd6f3be8\""], "Date": ["Fri, 13 Jul 2018 21:45:17 GMT"], "X-RateLimit-Remaining": ["47"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.050961"], "Vary": ["Accept"], "X-RateLimit-Limit": ["60"], "Cache-Control": ["public, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-RateLimit-Reset": ["1531519836"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/git/trees/52a3f30e05cf434285e775979f01f1a8355049a7?recursive=1"}, "recorded_at": "2018-07-13T21:45:17"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/integration/test_repos_repo.py b/tests/integration/test_repos_repo.py index dce6785b..8d924b8e 100644 --- a/tests/integration/test_repos_repo.py +++ b/tests/integration/test_repos_repo.py @@ -1280,6 +1280,18 @@ class TestRepository(helper.IntegrationHelper): tree = repository.tree('52a3f30e05cf434285e775979f01f1a8355049a7') assert isinstance(tree, github3.git.Tree) + assert len(tree.tree) == 18 + + def test_tree_recursive(self): + """Test the ability to retrieve a tree recursively.""" + cassette_name = self.cassette_name('tree_recursive') + with self.recorder.use_cassette(cassette_name): + repository = self.gh.repository('sigmavirus24', 'github3.py') + tree = repository.tree( + '52a3f30e05cf434285e775979f01f1a8355049a7', recursive=True) + + assert isinstance(tree, github3.git.Tree) + assert len(tree.tree) == 275 def test_weekly_commit_count(self): """ diff --git a/tests/unit/test_repos_repo.py b/tests/unit/test_repos_repo.py index 1152bfc1..b0e60aca 100644 --- a/tests/unit/test_repos_repo.py +++ b/tests/unit/test_repos_repo.py @@ -1024,7 +1024,8 @@ class TestRepository(helper.UnitHelper): self.instance.tree('fake-sha') self.session.get.assert_called_once_with( - url_for('git/trees/fake-sha') + url_for('git/trees/fake-sha'), + params=None ) def test_tree_required_sha(self): @@ -1033,6 +1034,15 @@ class TestRepository(helper.UnitHelper): assert self.session.get.called is False + def test_tree_optional_recursive(self): + """Verify the request for recursively retrieving a tree.""" + self.instance.tree('fake-sha', recursive=True) + + self.session.get.assert_called_once_with( + url_for('git/trees/fake-sha'), + params={'recursive': 1} + ) + def test_str(self): """Verify instance string is formatted correctly.""" owner = self.instance.owner
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "betamax", "betamax_matchers" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 betamax==0.8.1 betamax-matchers==0.4.0 certifi==2021.5.30 charset-normalizer==2.0.12 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 -e git+https://github.com/sigmavirus24/github3.py.git@bb40e7e89441f9d2c084e4a26fb4a2a9a0a4c305#egg=github3.py idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mock==1.0.1 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 swebench-matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 uritemplate==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: github3.py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - betamax==0.8.1 - betamax-matchers==0.4.0 - charset-normalizer==2.0.12 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mock==1.0.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - uritemplate==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - wheel==0.21.0 - zipp==3.6.0 prefix: /opt/conda/envs/github3.py
[ "tests/integration/test_repos_repo.py::TestRepository::test_tree_recursive", "tests/unit/test_repos_repo.py::TestRepository::test_tree", "tests/unit/test_repos_repo.py::TestRepository::test_tree_optional_recursive" ]
[]
[ "tests/integration/test_repos_repo.py::TestRepository::test_add_collaborator", "tests/integration/test_repos_repo.py::TestRepository::test_archive_a_repository", "tests/integration/test_repos_repo.py::TestRepository::test_create_deployment", "tests/integration/test_repos_repo.py::TestRepository::test_assignees", "tests/integration/test_repos_repo.py::TestRepository::test_delete_subscription", "tests/integration/test_repos_repo.py::TestRepository::test_issue_events", "tests/integration/test_repos_repo.py::TestRepository::test_deployment", "tests/integration/test_repos_repo.py::TestRepository::test_deployments", "tests/integration/test_repos_repo.py::TestRepository::test_blob", "tests/integration/test_repos_repo.py::TestRepository::test_issue_with_multiple_assignees", "tests/integration/test_repos_repo.py::TestRepository::test_branch", "tests/integration/test_repos_repo.py::TestRepository::test_directory_contents", "tests/integration/test_repos_repo.py::TestRepository::test_create_empty_blob", "tests/integration/test_repos_repo.py::TestRepository::test_branches", "tests/integration/test_repos_repo.py::TestRepository::test_directory_contents_for_a_file", "tests/integration/test_repos_repo.py::TestRepository::test_create_file", "tests/integration/test_repos_repo.py::TestRepository::test_code_frequency", "tests/integration/test_repos_repo.py::TestRepository::test_collaborators", "tests/integration/test_repos_repo.py::TestRepository::test_edit", "tests/integration/test_repos_repo.py::TestRepository::test_create_fork", "tests/integration/test_repos_repo.py::TestRepository::test_create_hook", "tests/integration/test_repos_repo.py::TestRepository::test_comments", "tests/integration/test_repos_repo.py::TestRepository::test_create_issue", "tests/integration/test_repos_repo.py::TestRepository::test_commit_activity", "tests/integration/test_repos_repo.py::TestRepository::test_commit_comment", "tests/integration/test_repos_repo.py::TestRepository::test_commits", "tests/integration/test_repos_repo.py::TestRepository::test_events", "tests/integration/test_repos_repo.py::TestRepository::test_create_issue_both_assignee_and_assignees", "tests/integration/test_repos_repo.py::TestRepository::test_compare_commits", "tests/integration/test_repos_repo.py::TestRepository::test_file_contents", "tests/integration/test_repos_repo.py::TestRepository::test_create_issue_multiple_assignees", "tests/integration/test_repos_repo.py::TestRepository::test_create_key", "tests/integration/test_repos_repo.py::TestRepository::test_create_label", "tests/integration/test_repos_repo.py::TestRepository::test_issues_accepts_state_all", "tests/integration/test_repos_repo.py::TestRepository::test_create_milestone", "tests/integration/test_repos_repo.py::TestRepository::test_issues_sorts_ascendingly", "tests/integration/test_repos_repo.py::TestRepository::test_key", "tests/integration/test_repos_repo.py::TestRepository::test_create_project", "tests/integration/test_repos_repo.py::TestRepository::test_keys", "tests/integration/test_repos_repo.py::TestRepository::test_label", "tests/integration/test_repos_repo.py::TestRepository::test_create_pull", "tests/integration/test_repos_repo.py::TestRepository::test_labels", "tests/integration/test_repos_repo.py::TestRepository::test_create_pull_from_issue", "tests/integration/test_repos_repo.py::TestRepository::test_create_ref", "tests/integration/test_repos_repo.py::TestRepository::test_languages", "tests/integration/test_repos_repo.py::TestRepository::test_create_release", "tests/integration/test_repos_repo.py::TestRepository::test_contributor_statistics", "tests/integration/test_repos_repo.py::TestRepository::test_forks", "tests/integration/test_repos_repo.py::TestRepository::test_latest_release", "tests/integration/test_repos_repo.py::TestRepository::test_create_status", "tests/integration/test_repos_repo.py::TestRepository::test_license", "tests/integration/test_repos_repo.py::TestRepository::test_create_tag", "tests/integration/test_repos_repo.py::TestRepository::test_mark_notifications", "tests/integration/test_repos_repo.py::TestRepository::test_delete", "tests/integration/test_repos_repo.py::TestRepository::test_contributors", "tests/integration/test_repos_repo.py::TestRepository::test_git_commit", "tests/integration/test_repos_repo.py::TestRepository::test_merge", "tests/integration/test_repos_repo.py::TestRepository::test_delete_key", "tests/integration/test_repos_repo.py::TestRepository::test_milestone", "tests/integration/test_repos_repo.py::TestRepository::test_create_blob", "tests/integration/test_repos_repo.py::TestRepository::test_create_branch_ref", "tests/integration/test_repos_repo.py::TestRepository::test_milestones", "tests/integration/test_repos_repo.py::TestRepository::test_create_comment", "tests/integration/test_repos_repo.py::TestRepository::test_subscribers", "tests/integration/test_repos_repo.py::TestRepository::test_hook", "tests/integration/test_repos_repo.py::TestRepository::test_subscription", "tests/integration/test_repos_repo.py::TestRepository::test_hooks", "tests/integration/test_repos_repo.py::TestRepository::test_create_commit", "tests/integration/test_repos_repo.py::TestRepository::test_create_commit_with_empty_committer", "tests/integration/test_repos_repo.py::TestRepository::test_ignore", "tests/integration/test_repos_repo.py::TestRepository::test_protected_branches", "tests/integration/test_repos_repo.py::TestRepository::test_pull_request", "tests/integration/test_repos_repo.py::TestRepository::test_tag", "tests/integration/test_repos_repo.py::TestRepository::test_import_issue", "tests/integration/test_repos_repo.py::TestRepository::test_tags", "tests/integration/test_repos_repo.py::TestRepository::test_import_issue_with_comments", "tests/integration/test_repos_repo.py::TestRepository::test_teams", "tests/integration/test_repos_repo.py::TestRepository::test_topics", "tests/integration/test_repos_repo.py::TestRepository::test_imported_issue", "tests/integration/test_repos_repo.py::TestRepository::test_pull_requests", "tests/integration/test_repos_repo.py::TestRepository::test_tree", "tests/integration/test_repos_repo.py::TestRepository::test_pull_requests_accepts_sort_and_direction", "tests/integration/test_repos_repo.py::TestRepository::test_imported_issues", "tests/integration/test_repos_repo.py::TestRepository::test_is_assignee", "tests/integration/test_repos_repo.py::TestRepository::test_is_collaborator", "tests/integration/test_repos_repo.py::TestRepository::test_readme", "tests/integration/test_repos_repo.py::TestRepository::test_issue", "tests/integration/test_repos_repo.py::TestRepository::test_ref", "tests/integration/test_repos_repo.py::TestRepository::test_weekly_commit_count", "tests/integration/test_repos_repo.py::TestRepoComment::test_delete", "tests/integration/test_repos_repo.py::TestContents::test_delete", "tests/integration/test_repos_repo.py::TestRepoComment::test_edit", "tests/integration/test_repos_repo.py::TestRepository::test_network_events", "tests/integration/test_repos_repo.py::TestRepoCommit::test_diff", "tests/integration/test_repos_repo.py::TestRepository::test_notifications", "tests/integration/test_repos_repo.py::TestContents::test_update", "tests/integration/test_repos_repo.py::TestRepository::test_original_license", "tests/integration/test_repos_repo.py::TestRepoCommit::test_patch", "tests/integration/test_repos_repo.py::TestHook::test_delete", "tests/integration/test_repos_repo.py::TestRepository::test_refs", "tests/integration/test_repos_repo.py::TestRepository::test_refs_raises_unprocessable_exception", "tests/integration/test_repos_repo.py::TestHook::test_edit", "tests/integration/test_repos_repo.py::TestRepository::test_release", "tests/integration/test_repos_repo.py::TestRepository::test_project", "tests/integration/test_repos_repo.py::TestRepository::test_release_from_tag", "tests/integration/test_repos_repo.py::TestHook::test_ping", "tests/integration/test_repos_repo.py::TestRepository::test_releases", "tests/integration/test_repos_repo.py::TestRepository::test_projects", "tests/integration/test_repos_repo.py::TestHook::test_test", "tests/integration/test_repos_repo.py::TestRepository::test_remove_collaborator", "tests/integration/test_repos_repo.py::TestRepository::test_replace_topics", "tests/unit/test_repos_repo.py::TestRepository::test_create_label", "tests/unit/test_repos_repo.py::TestRepository::test_create_fork_to_organization", "tests/unit/test_repos_repo.py::TestRepository::test_create_label_required_color", "tests/integration/test_repos_repo.py::TestRepository::test_stargazers", "tests/unit/test_repos_repo.py::TestRepository::test_create_hook", "tests/integration/test_repos_repo.py::TestRepository::test_statuses", "tests/unit/test_repos_repo.py::TestRepository::test_create_label_required_name", "tests/integration/test_repos_repo.py::TestRepository::test_subscribe", "tests/unit/test_repos_repo.py::TestRepository::test_create_hook_requires_valid_config", "tests/unit/test_repos_repo.py::TestRepository::test_create_hook_requires_valid_name", "tests/unit/test_repos_repo.py::TestRepository::test_create_ref_requires_a_non_None_sha", "tests/unit/test_repos_repo.py::TestRepository::test_create_label_required_name_and_color", "tests/unit/test_repos_repo.py::TestRepository::test_create_hook_requires_valid_name_and_config", "tests/unit/test_repos_repo.py::TestRepository::test_create_ref_requires_a_reference_start_with_refs", "tests/unit/test_repos_repo.py::TestRepository::test_create_milestone", "tests/unit/test_repos_repo.py::TestRepository::test_create_issue", "tests/unit/test_repos_repo.py::TestRepository::test_create_ref_requires_a_reference_with_two_slashes", "tests/unit/test_repos_repo.py::TestRepository::test_create_milestone_accepted_state", "tests/unit/test_repos_repo.py::TestRepository::test_create_ref_requires_a_truthy_sha", "tests/unit/test_repos_repo.py::TestRepository::test_create_project", "tests/unit/test_repos_repo.py::TestRepository::test_create_issue_multiple_assignees", "tests/unit/test_repos_repo.py::TestRepository::test_create_status", "tests/unit/test_repos_repo.py::TestRepository::test_create_pull", "tests/unit/test_repos_repo.py::TestRepository::test_create_status_required_sha", "tests/unit/test_repos_repo.py::TestRepository::test_create_pull_from_issue", "tests/unit/test_repos_repo.py::TestRepository::test_create_issue_require_valid_issue", "tests/unit/test_repos_repo.py::TestRepository::test_create_status_required_sha_and_state", "tests/unit/test_repos_repo.py::TestRepository::test_create_pull_from_issue_required_issue_number", "tests/unit/test_repos_repo.py::TestRepository::test_create_key", "tests/unit/test_repos_repo.py::TestRepository::test_create_pull_private", "tests/unit/test_repos_repo.py::TestRepository::test_create_status_required_state", "tests/unit/test_repos_repo.py::TestRepository::test_create_key_readonly", "tests/unit/test_repos_repo.py::TestRepository::test_create_tag_that_is_not_lightweight", "tests/integration/test_repos_repo.py::TestComparison::test_diff", "tests/unit/test_repos_repo.py::TestRepository::test_create_pull_private_required_data", "tests/unit/test_repos_repo.py::TestRepository::test_create_key_requires_a_valid_key", "tests/unit/test_repos_repo.py::TestRepository::test_create_tree", "tests/unit/test_repos_repo.py::TestRepository::test_create_key_requires_a_valid_title", "tests/unit/test_repos_repo.py::TestRepository::test_create_tree_rejects_invalid_trees", "tests/unit/test_repos_repo.py::TestRepository::test_create_ref", "tests/unit/test_repos_repo.py::TestRepository::test_create_key_requires_a_valid_title_and_key", "tests/unit/test_repos_repo.py::TestRepository::test_git_commit_required_sha", "tests/unit/test_repos_repo.py::TestRepository::test_create_tree_with_base_tree", "tests/unit/test_repos_repo.py::TestRepository::test_delete_key", "tests/unit/test_repos_repo.py::TestRepository::test_hook", "tests/unit/test_repos_repo.py::TestRepository::test_delete_key_required_id", "tests/unit/test_repos_repo.py::TestRepository::test_delete", "tests/unit/test_repos_repo.py::TestRepository::test_issue_required_number", "tests/unit/test_repos_repo.py::TestRepository::test_delete_subscription", "tests/unit/test_repos_repo.py::TestRepository::test_hook_required_hook", "tests/unit/test_repos_repo.py::TestRepository::test_deployment", "tests/unit/test_repos_repo.py::TestRepository::test_key", "tests/unit/test_repos_repo.py::TestRepository::test_import_issue", "tests/unit/test_repos_repo.py::TestRepository::test_deployment_requires_positive_int", "tests/unit/test_repos_repo.py::TestRepository::test_key_requires_positive_id", "tests/unit/test_repos_repo.py::TestRepository::test_imported_issue", "tests/unit/test_repos_repo.py::TestRepository::test_label", "tests/unit/test_repos_repo.py::TestRepository::test_directory_contents", "tests/unit/test_repos_repo.py::TestRepository::test_label_required_name", "tests/unit/test_repos_repo.py::TestRepository::test_is_assignee", "tests/unit/test_repos_repo.py::TestRepository::test_directory_contents_with_ref", "tests/unit/test_repos_repo.py::TestRepository::test_latest_pages_build", "tests/unit/test_repos_repo.py::TestRepository::test_edit", "tests/unit/test_repos_repo.py::TestRepository::test_is_assignee_required_username", "tests/unit/test_repos_repo.py::TestRepository::test_edit_required_name", "tests/unit/test_repos_repo.py::TestRepository::test_is_collaborator", "tests/unit/test_repos_repo.py::TestRepository::test_latest_release", "tests/unit/test_repos_repo.py::TestRepository::test_file_contents", "tests/unit/test_repos_repo.py::TestRepository::test_mark_notifications", "tests/unit/test_repos_repo.py::TestRepository::test_is_collaborator_required_username", "tests/unit/test_repos_repo.py::TestRepository::test_git_commit", "tests/unit/test_repos_repo.py::TestRepository::test_issue", "tests/unit/test_repos_repo.py::TestRepository::test_mark_notifications_required_last_read", "tests/unit/test_repos_repo.py::TestRepository::test_readme", "tests/unit/test_repos_repo.py::TestRepository::test_merge_no_message", "tests/unit/test_repos_repo.py::TestRepository::test_merge", "tests/unit/test_repos_repo.py::TestRepository::test_ref", "tests/unit/test_repos_repo.py::TestRepository::test_milestone", "tests/unit/test_repos_repo.py::TestRepository::test_ref_required_ref", "tests/unit/test_repos_repo.py::TestRepository::test_str", "tests/unit/test_repos_repo.py::TestRepository::test_milestone_requires_positive_id", "tests/unit/test_repos_repo.py::TestRepository::test_release_from_tag", "tests/unit/test_repos_repo.py::TestRepository::test_subscription", "tests/unit/test_repos_repo.py::TestRepository::test_pages", "tests/unit/test_repos_repo.py::TestRepository::test_remove_collaborator", "tests/unit/test_repos_repo.py::TestRepository::test_parent", "tests/unit/test_repos_repo.py::TestRepository::test_tag", "tests/unit/test_repos_repo.py::TestRepository::test_remove_collaborator_required_username", "tests/unit/test_repos_repo.py::TestRepository::test_permission", "tests/unit/test_repos_repo.py::TestRepository::test_tag_required_sha", "tests/unit/test_repos_repo.py::TestRepository::test_replace_topics", "tests/unit/test_repos_repo.py::TestRepository::test_topics", "tests/unit/test_repos_repo.py::TestRepository::test_project", "tests/unit/test_repos_repo.py::TestRepository::test_source", "tests/unit/test_repos_repo.py::TestRepository::test_pull_request", "tests/unit/test_repos_repo.py::TestRepository::test_pull_request_required_number", "tests/integration/test_repos_repo.py::TestComparison::test_patch", "tests/unit/test_repos_repo.py::TestRepository::test_weekly_commit_count", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_collaborators_valid_affiliation", "tests/unit/test_repos_repo.py::TestRepository::test_tree_required_sha", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_assignees", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_comments", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_contributor_statistics", "tests/unit/test_repos_repo.py::TestRepository::test_add_collaborator", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_branches", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_commit_activity", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_branches_protected", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_contributors", "tests/unit/test_repos_repo.py::TestRepository::test_add_null_collaborator", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_commits", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_code_frequency", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_contributors_with_anon", "tests/unit/test_repos_repo.py::TestRepository::test_asset", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_collaborators", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_commits_per_page", "tests/unit/test_repos_repo.py::TestRepository::test_asset_requires_a_positive_id", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_deployments", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_events", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_collaborators_invalid_affiliation", "tests/unit/test_repos_repo.py::TestRepository::test_create_branch_ref", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_commits_sha_path", "tests/unit/test_repos_repo.py::TestRepository::test_create_file", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_milestones", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_forks", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_commits_since_until_datetime", "tests/unit/test_repos_repo.py::TestRepository::test_create_file_required_content", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_hooks", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_network_events", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_imported_issues", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_pull_requests_ignore_invalid_state", "tests/unit/test_repos_repo.py::TestRepository::test_create_fork", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_notifications", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_issue_events", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_refs", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_pages_builds", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_issues", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_statuses", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_refs_with_a_subspace", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_projects", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_keys", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_statuses_requires_a_sha", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_releases", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_pull_requests", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_subscribers", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_stargazers", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_labels", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_add_collaborator", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_key", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_file", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_tags", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_languages", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_fork", "tests/unit/test_repos_repo.py::TestRepositoryIterator::test_teams", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_ref", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_project", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_hook", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_edit", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_status", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_pull", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_issue", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_create_pull_from_issue", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_delete_key", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_imported_issue", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_hook", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_keys", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_imported_issues", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_hooks", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_delete_subscription", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_import_issue", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_is_collaborator", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_mark_notifications", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_notifications", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_key", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_replace_topics", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_merge", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_pages_builds", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_subscription", "tests/unit/test_repos_repo.py::TestContents::test_html_url", "tests/unit/test_repos_repo.py::TestContents::test_delete", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_remove_collaborator", "tests/unit/test_repos_repo.py::TestContents::test_str", "tests/unit/test_repos_repo.py::TestRepositoryRequiresAuth::test_teams", "tests/unit/test_repos_repo.py::TestContents::test_git_url", "tests/unit/test_repos_repo.py::TestContents::test_update", "tests/unit/test_repos_repo.py::TestHook::test_edit", "tests/unit/test_repos_repo.py::TestContentsRequiresAuth::test_delete", "tests/unit/test_repos_repo.py::TestHook::test_delete", "tests/unit/test_repos_repo.py::TestContents::test_update_required_content", "tests/unit/test_repos_repo.py::TestContentsRequiresAuth::test_update", "tests/unit/test_repos_repo.py::TestHookRequiresAuth::test_delete", "tests/unit/test_repos_repo.py::TestHook::test_str", "tests/unit/test_repos_repo.py::TestHook::test_ping", "tests/unit/test_repos_repo.py::TestHook::test_edit_failed", "tests/unit/test_repos_repo.py::TestHookRequiresAuth::test_ping", "tests/unit/test_repos_repo.py::TestHookRequiresAuth::test_test", "tests/unit/test_repos_repo.py::TestHook::test_test", "tests/unit/test_repos_repo.py::TestRepoComment::test_update", "tests/unit/test_repos_repo.py::TestHookRequiresAuth::test_edit", "tests/unit/test_repos_repo.py::TestRepoComment::test_delete", "tests/unit/test_repos_repo.py::TestRepoCommentRequiresAuth::test_delete", "tests/unit/test_repos_repo.py::TestRepoCommit::test_diff", "tests/unit/test_repos_repo.py::TestRepoComment::test_str", "tests/unit/test_repos_repo.py::TestRepoCommentRequiresAuth::test_update", "tests/unit/test_repos_repo.py::TestRepoCommit::test_patch", "tests/unit/test_repos_repo.py::TestRepoCommit::test_str", "tests/unit/test_repos_repo.py::TestComparison::test_patch", "tests/unit/test_repos_repo.py::TestComparison::test_diff", "tests/unit/test_repos_repo.py::TestComparison::test_str", "tests/unit/test_repos_repo.py::TestRepositoryCompatibility_2_12::test_repository" ]
[]
BSD 3-Clause "New" or "Revised" License
2,776
[ "github3/repos/repo.py" ]
[ "github3/repos/repo.py" ]
fennekki__cdparacord-32
bcc53aad7774868d42e36cddc42da72abdf990c2
2018-07-14 09:40:21
ce0444604ac0d87ec586fd94b6c62096b10c3657
diff --git a/cdparacord/config.py b/cdparacord/config.py index 962327b..c7e52e3 100644 --- a/cdparacord/config.py +++ b/cdparacord/config.py @@ -30,13 +30,11 @@ class Config: __default_config = { # Config for the encoder 'encoder': { - 'lame': { - 'parameters': [ - '-V2', - '${one_file}', - '${out_file}' - ] - } + 'lame': [ + '-V2', + '${one_file}', + '${out_file}' + ] }, # Tasks follow the format of encoder # post_rip are run after an individual file has been ripped to a diff --git a/cdparacord/dependency.py b/cdparacord/dependency.py index 9f47b51..b2f165e 100644 --- a/cdparacord/dependency.py +++ b/cdparacord/dependency.py @@ -27,7 +27,7 @@ class Dependency: # dir. I don't think anyone wants that but they... might... return name - for path in os.environ["PATH"].split(os.pathsep): + for path in os.environ['PATH'].split(os.pathsep): path = path.strip('"') binname = os.path.join(path, name) if os.path.isfile(binname) and os.access(binname, os.X_OK): @@ -35,18 +35,53 @@ class Dependency: # If we haven't returned, the executable was not found raise DependencyError( - "Executable {} not found or not executable".format(name)) + 'Executable {} not found or not executable'.format(name)) + + def _verify_action_params(self, action): + """Confirm certain things about action configuration.""" + if len(action) > 1: + multiple_actions = ', '.join(action.keys()) + raise DependencyError( + 'Tried to configure multiple actions in one dict: {}' + .format(multiple_actions)) + + if len(action) < 1: + raise DependencyError( + 'Configuration opened an action dict but it had no keys') + + action_key = list(action.keys())[0] + action_params = action[action_key] + + if type(action_params) is not list: + raise DependencyError( + '{} configuration has type {} (list expected)' + .format(action_key, type(action_params).__name__)) + + for item in action_params: + if type(item) is not str: + raise DependencyError( + 'Found {} parameter {} with type {} (str expected)' + .format(action_key, item, type(item).__name__)) def _discover(self): """Discover dependencies and ensure they exist.""" - # Find the executables + # Find the executables, and verify parameters for post-actions + # and encoder self._encoder = self._find_executable( list(self._config.get('encoder').keys())[0]) + + self._verify_action_params(self._config.get('encoder')) + self._editor = self._find_executable(self._config.get('editor')) self._cdparanoia = self._find_executable( self._config.get('cdparanoia')) + for post_action in ('post_rip', 'post_encode', 'post_finished'): + for action in self._config.get(post_action): + self._find_executable(list(action.keys())[0]) + self._verify_action_params(action) + # Ensure discid is importable try: import discid @@ -54,7 +89,7 @@ class Dependency: # it would be ridiculous as it only depends on documented # behaviour and only raises a further exception. except OSError as e: # pragma: no cover - raise DependencyError("Could not find libdiscid") from e + raise DependencyError('Could not find libdiscid') from e @property def encoder(self):
Could not find "parameters". <!-- Describe your issue and what you'd like to be done about it quickly. If you intend to implement the solution yourself, you don't need to file a bug before a pull request. Note that the maintainer will only take note of issues added to the "continuous backlog" project. Click "Projects -> continous backlog" on the right hand side of the issue box and correctly tag your issue as "bug", "enhancement" or "question" depending on its purpose. This way it'll be visible in the work queue. --> Under some quite unknown circumstances, with at least one specific album (lnTlccHq6F8XNcvNFafPUyaw1mA-), a fresh git clone fails to encode on the first track and crashes. The script correctly rips track 1 and when it starts to encode there's an issue. After the rip... Prints: ``` Could not find "parameters". Can't init infile 'parameters' ``` And then raises: ``` File "cdparacord/rip.py", line 104, in _encode_track raise RipError('Failed to encode track {}'.format(track.filename)) ```
fennekki/cdparacord
diff --git a/tests/test_config.py b/tests/test_config.py index 0bc761c..1a85588 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -185,4 +185,40 @@ def test_update_config_unknown_keys(mock_temp_home, capsys): c.update({'invalid_key': True}, quiet_ignore=False) out, err = capsys.readouterr() - assert err == "Warning: Unknown configuration key invalid_key\n" + assert err == 'Warning: Unknown configuration key invalid_key\n' + +def test_ensure_default_encoder_keys_are_strings(mock_temp_home): + """Test default encoder configuration.""" + from cdparacord import config + + c = config.Config() + + assert len(c.get('encoder')) == 1 + + for encoder in c.get('encoder'): + encoder_params = c.get('encoder')[encoder] + # If it's not a list something's wrong + assert type(encoder_params) is list + + for item in encoder_params: + # And the params should be strings + assert type(item) is str + +def test_ensure_default_postaction_keys_are_strings(mock_temp_home): + """Test default encoder configuration.""" + from cdparacord import config + + c = config.Config() + + for post_action in ('post_rip', 'post_encode', 'post_finished'): + for action in c.get(post_action): + assert len(action) == 1 + + for action_key in action: + action_params = action[action_key] + # If it's not a list something's wrong + assert type(action_params) is list + + for item in action_params: + # And the params should be strings + assert type(item) is str diff --git a/tests/test_dependency.py b/tests/test_dependency.py index 96c0519..e7e92df 100644 --- a/tests/test_dependency.py +++ b/tests/test_dependency.py @@ -9,45 +9,57 @@ from cdparacord.dependency import Dependency, DependencyError @pytest.fixture -def mock_external_binary(): +def mock_config_external(): + """Mock the Config class such that it returns self.param. + + In addition, querying for encoder results in a dict that contains + an empty list in the key self.param. + """ + class MockConfig: + def __init__(self, param): + self.param = param + self.encoder = {self.param: []} + self.post = [{self.param: []}] + + def get(self, name): + # Maybe we should write a fake config file but there are + # Huge issues with mocking the config module... + if name == 'encoder': + return self.encoder + if name in ('post_rip', 'post_encode', 'post_finished'): + return self.post + return self.param + return MockConfig + + [email protected] +def mock_external_encoder(mock_config_external): """Mock an external dependency binary. - + Create a file, set it to be executable and return it as an - ostensible external binary. + ostensible external binary via configuration. """ with NamedTemporaryFile(prefix='cdparacord-unittest-') as f: os.chmod(f.name, stat.S_IXUSR) - yield f.name + conf = mock_config_external(f.name) + yield conf [email protected] -def mock_config_external(): - """Mock the Config class such that it always returns given name.""" - def get_config(mockbin): - class MockConfig: - def get(self, name): - # Maybe we should write a fake config file but there are - # Huge issues with mocking the config module... - if name == 'encoder': - return {mockbin: []} - return mockbin - return MockConfig() - return get_config - - -def test_find_valid_absolute_dependencies(mock_external_binary, mock_config_external): + +def test_find_valid_absolute_dependencies(mock_external_encoder): """Finds fake dependencies that exist by absolute path.""" - - Dependency(mock_config_external(mock_external_binary)) + Dependency(mock_external_encoder) -def test_find_valid_dependencies_in_path(mock_external_binary, mock_config_external, monkeypatch): +def test_find_valid_dependencies_in_path(mock_external_encoder, monkeypatch): """Finds fake dependencies that exist in $PATH.""" - dirname, basename = os.path.split(mock_external_binary) + dirname, basename = os.path.split(mock_external_encoder.param) # Set PATH to only contain the directory our things are in monkeypatch.setenv("PATH", dirname) - Dependency(mock_config_external(basename)) + conf = mock_external_encoder + conf.param = basename + Dependency(conf) def test_fail_to_find_dependencies(mock_config_external): @@ -55,28 +67,59 @@ def test_fail_to_find_dependencies(mock_config_external): # This file should not be executable by default so the finding # should fail with pytest.raises(DependencyError): - Dependency(mock_config_external(f.name)) + conf = mock_config_external(f.name) + Dependency(conf) -def test_get_encoder(mock_config_external, mock_external_binary): +def test_get_encoder(mock_external_encoder): """Get the 'encoder' property.""" - deps = Dependency(mock_config_external(mock_external_binary)) + deps = Dependency(mock_external_encoder) # It's an absolute path so the value should be the same - assert deps.encoder == mock_external_binary + assert deps.encoder == mock_external_encoder.param -def test_get_editor(mock_config_external, mock_external_binary): +def test_get_editor(mock_external_encoder): """Get the 'editor' property.""" - deps = Dependency(mock_config_external(mock_external_binary)) + deps = Dependency(mock_external_encoder) # It's an absolute path so the value should be the same - assert deps.editor == mock_external_binary + assert deps.editor == mock_external_encoder.param -def test_get_cdparanoia(mock_config_external, mock_external_binary): +def test_get_cdparanoia(mock_external_encoder): """Get the 'cdparanoia' property.""" - deps = Dependency(mock_config_external(mock_external_binary)) + deps = Dependency(mock_external_encoder) # It's an absolute path so the value should be the same - assert deps.cdparanoia == mock_external_binary + assert deps.cdparanoia == mock_external_encoder.param + +def test_verify_action_params(mock_external_encoder): + """Ensure encoder and post-action parameter verification works.""" + + conf = mock_external_encoder + deps = Dependency(conf) + + # Dict can only have one key + invalid_input = {'a': [], 'b': []} + with pytest.raises(DependencyError): + deps._verify_action_params(invalid_input) + + # Dict mustn't be empty + invalid_input = {} + with pytest.raises(DependencyError): + deps._verify_action_params(invalid_input) + + # Type of encoder param container should be list + invalid_input = {conf.param: {'ah', 'beh'}} + with pytest.raises(DependencyError): + deps._verify_action_params(invalid_input) + + # Type of encoder param items should be str + invalid_input = {conf.param: [1]} + with pytest.raises(DependencyError): + deps._verify_action_params(invalid_input) + + # Test valid + deps._verify_action_params({'valid': ['totally', 'valid']}) +
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 2 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "Pipfile", "pip_packages": [ "pytest", "coverage", "tox", "vulture" ], "pre_install": [ "apt-get update", "apt-get install -y libdiscid0" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 -e git+https://github.com/fennekki/cdparacord.git@bcc53aad7774868d42e36cddc42da72abdf990c2#egg=cdparacord certifi==2021.5.30 click==8.0.4 coverage==6.2 discid==1.2.0 distlib==0.3.9 filelock==3.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 musicbrainzngs==0.7.1 mutagen==1.45.1 packaging==21.3 pipfile==0.0.2 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 PyYAML==6.0.1 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 virtualenv==20.17.1 vulture==2.8 zipp==3.6.0
name: cdparacord channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - pipfile=0.0.2=py_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - click==8.0.4 - coverage==6.2 - discid==1.2.0 - distlib==0.3.9 - filelock==3.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - musicbrainzngs==0.7.1 - mutagen==1.45.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==6.0.1 - six==1.17.0 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - virtualenv==20.17.1 - vulture==2.8 - zipp==3.6.0 prefix: /opt/conda/envs/cdparacord
[ "tests/test_config.py::test_ensure_default_encoder_keys_are_strings", "tests/test_dependency.py::test_verify_action_params" ]
[ "tests/test_config.py::test_fail_to_create_config_dir", "tests/test_config.py::test_fail_to_open_config_file" ]
[ "tests/test_config.py::test_create_config", "tests/test_config.py::test_get_encoder", "tests/test_config.py::test_fail_to_get_variable", "tests/test_config.py::test_read_config_file", "tests/test_config.py::test_read_invalid_config", "tests/test_config.py::test_update_config_no_unknown_keys", "tests/test_config.py::test_update_config_unknown_keys", "tests/test_config.py::test_ensure_default_postaction_keys_are_strings", "tests/test_dependency.py::test_find_valid_absolute_dependencies", "tests/test_dependency.py::test_find_valid_dependencies_in_path", "tests/test_dependency.py::test_fail_to_find_dependencies", "tests/test_dependency.py::test_get_encoder", "tests/test_dependency.py::test_get_editor", "tests/test_dependency.py::test_get_cdparanoia" ]
[]
BSD 2-Clause "Simplified" License
2,777
[ "cdparacord/config.py", "cdparacord/dependency.py" ]
[ "cdparacord/config.py", "cdparacord/dependency.py" ]
zalando-stups__senza-521
d5477538a198df36914cdd2dbe9e10accb4dec5f
2018-07-14 17:31:04
e9f84724628b4761f8d5da4d37a2993f11d6433b
coveralls: [![Coverage Status](https://coveralls.io/builds/17987711/badge)](https://coveralls.io/builds/17987711) Coverage remained the same at 89.549% when pulling **e0c174c48d8db1963e113b7ca5a6f647b20c2f16 on lmineiro:fix-elastigroup-healthchecktype** into **d5477538a198df36914cdd2dbe9e10accb4dec5f on zalando-stups:master**. jmcs: :+1: lmineiro: 👍
diff --git a/requirements.txt b/requirements.txt index 4b612cc..969c68d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,6 @@ dnspython>=1.15.0 stups-pierone>=1.0.34 boto3>=1.3.0 botocore>=1.4.10 -pytest>=2.7.3 +pytest>=3.6.3 raven typing diff --git a/setup.py b/setup.py index b038fc5..4d36574 100755 --- a/setup.py +++ b/setup.py @@ -131,7 +131,7 @@ def setup_package(): install_requires=install_reqs, setup_requires=['flake8'], cmdclass=cmdclass, - tests_require=['pytest-cov', 'pytest', 'mock', 'responses'], + tests_require=['pytest-cov', 'pytest>=3.6.3', 'mock', 'responses'], command_options=command_options, entry_points={'console_scripts': CONSOLE_SCRIPTS, 'senza.templates': ['bgapp = senza.templates.bgapp', diff --git a/spotinst/components/elastigroup.py b/spotinst/components/elastigroup.py index aa916e9..8280ed4 100644 --- a/spotinst/components/elastigroup.py +++ b/spotinst/components/elastigroup.py @@ -20,7 +20,12 @@ from spotinst import MissingSpotinstAccount SPOTINST_LAMBDA_FORMATION_ARN = 'arn:aws:lambda:{}:178579023202:function:spotinst-cloudformation' SPOTINST_API_URL = 'https://api.spotinst.io' -ELASTIGROUP_DEFAULT_STRATEGY = {"risk": 100, "availabilityVsCost": "balanced", "utilizeReservedInstances": True} +ELASTIGROUP_DEFAULT_STRATEGY = { + "risk": 100, + "availabilityVsCost": "balanced", + "utilizeReservedInstances": True, + "fallbackToOd": True, +} ELASTIGROUP_DEFAULT_PRODUCT = "Linux/UNIX" @@ -33,7 +38,7 @@ def component_elastigroup(definition, configuration, args, info, force, account_ """ definition = ensure_keys(ensure_keys(definition, "Resources"), "Mappings", "Senza", "Info") if "SpotinstAccessToken" not in definition["Mappings"]["Senza"]["Info"]: - raise click.UsageError("You have to specificy your SpotinstAccessToken attribute inside the SenzaInfo " + raise click.UsageError("You have to specify your SpotinstAccessToken attribute inside the SenzaInfo " "to be able to use Elastigroups") configuration = ensure_keys(configuration, "Elastigroup") @@ -332,6 +337,7 @@ def extract_load_balancer_name(configuration, elastigroup_config: dict): if "ElasticLoadBalancer" in configuration: load_balancer_refs = configuration.pop("ElasticLoadBalancer") + health_check_type = "ELB" if isinstance(load_balancer_refs, str): load_balancers.append({ "name": {"Ref": load_balancer_refs}, @@ -344,6 +350,7 @@ def extract_load_balancer_name(configuration, elastigroup_config: dict): "type": "CLASSIC" }) if "ElasticLoadBalancerV2" in configuration: + health_check_type = "TARGET_GROUP" load_balancer_refs = configuration.pop("ElasticLoadBalancerV2") if isinstance(load_balancer_refs, str): load_balancers.append({ @@ -358,16 +365,13 @@ def extract_load_balancer_name(configuration, elastigroup_config: dict): }) if len(load_balancers) > 0: - # use ELB health check by default when there are LBs - health_check_type = "ELB" launch_spec_config["loadBalancersConfig"] = {"loadBalancers": load_balancers} - if "healthCheckType" in launch_spec_config: - health_check_type = launch_spec_config["healthCheckType"] - elif "HealthCheckType" in configuration: - health_check_type = configuration["HealthCheckType"] + health_check_type = launch_spec_config.get("healthCheckType", + configuration.get("HealthCheckType", health_check_type)) + grace_period = launch_spec_config.get("healthCheckGracePeriod", + configuration.get('HealthCheckGracePeriod', 300)) launch_spec_config["healthCheckType"] = health_check_type - grace_period = launch_spec_config.get("healthCheckGracePeriod", configuration.get('HealthCheckGracePeriod', 300)) launch_spec_config["healthCheckGracePeriod"] = grace_period @@ -432,20 +436,16 @@ def extract_instance_types(configuration, elastigroup_config): are no SpotAlternatives the Elastigroup will have the same ondemand type as spot alternative If there's already a compute.instanceTypes config it will be left untouched """ - elastigroup_config = ensure_keys(ensure_keys(elastigroup_config, "strategy"), "compute") + elastigroup_config = ensure_keys(elastigroup_config, "compute") compute_config = elastigroup_config["compute"] - instance_type = configuration.pop("InstanceType", None) + + if "InstanceType" not in configuration: + raise click.UsageError("You need to specify the InstanceType attribute to be able to use Elastigroups") + instance_type = configuration.pop("InstanceType") spot_alternatives = configuration.pop("SpotAlternatives", None) if "instanceTypes" not in compute_config: - if not (instance_type or spot_alternatives): - raise click.UsageError("You have to specify one of InstanceType or SpotAlternatives") instance_types = {} - strategy = elastigroup_config["strategy"] - if instance_type: - instance_types.update({"ondemand": instance_type}) - strategy.update({"fallbackToOd": True}) - else: - strategy.update({"fallbackToOd": False}) + instance_types.update({"ondemand": instance_type}) if spot_alternatives: instance_types.update({"spot": spot_alternatives}) else:
Fix Elastigroup healthCheckType The current implementation always set the Elastigroup's healthCheckType to "ELB" if there are any load balancers, regardless of their types. The [API clearly states](https://api.spotinst.com/elastigroup/amazon-web-services-2/create/#compute.launchSpecification.healthCheckType) that ELB is for classic ELBs and TARGET_GROUP should be used for ALBs. See [Spotinst's recommendation](https://github.com/zalando-stups/senza/pull/516#pullrequestreview-136726224).
zalando-stups/senza
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index f77ccc1..022c0a8 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -442,7 +442,7 @@ def test_load_balancers(): "healthCheckGracePeriod": 300, }}}, }, - { # 1 application load balancer from Taupage, healthcheck type set to ELB (default grace period) + { # 1 application load balancer from Taupage, healthcheck type set to TARGET_GROUP (default grace period) "input": {"ElasticLoadBalancerV2": "bar"}, "given_config": {}, "expected_config": {"compute": {"launchSpecification": { @@ -451,11 +451,12 @@ def test_load_balancers(): {"arn": {"Ref": "barTargetGroup"}, "type": "TARGET_GROUP"}, ], }, - "healthCheckType": "ELB", + "healthCheckType": "TARGET_GROUP", "healthCheckGracePeriod": 300, }}}, }, - { # multiple application load balancers from Taupage, healthcheck type set to ELB (default grace period) + { # multiple application load balancers from Taupage, healthcheck type set to TARGET_GROUP + # (default grace period) "input": {"ElasticLoadBalancerV2": ["foo", "bar"]}, "given_config": {}, "expected_config": {"compute": {"launchSpecification": { @@ -465,11 +466,11 @@ def test_load_balancers(): {"arn": {"Ref": "barTargetGroup"}, "type": "TARGET_GROUP"}, ], }, - "healthCheckType": "ELB", + "healthCheckType": "TARGET_GROUP", "healthCheckGracePeriod": 300, }}}, }, - { # mixed load balancers from Taupage, healthcheck type set to ELB and custom Taupage grace period + { # mixed load balancers from Taupage, healthcheck type set to TARGET_GROUP and custom Taupage grace period "input": { "ElasticLoadBalancer": "foo", "ElasticLoadBalancerV2": "bar", @@ -483,7 +484,7 @@ def test_load_balancers(): {"arn": {"Ref": "barTargetGroup"}, "type": "TARGET_GROUP"}, ], }, - "healthCheckType": "ELB", + "healthCheckType": "TARGET_GROUP", "healthCheckGracePeriod": 42, }}}, }, @@ -598,9 +599,11 @@ def test_extract_security_group_ids(monkeypatch): assert test_case["expected_sgs"] == got["compute"]["launchSpecification"].get("securityGroupIds") -def test_missing_instance_types(): +def test_missing_instance_type(): with pytest.raises(click.UsageError): extract_instance_types({}, {}) + with pytest.raises(click.UsageError): + extract_instance_types({"SpotAlternatives": ["foo", "bar", "baz"]}, {}) def test_extract_instance_types(): @@ -608,20 +611,12 @@ def test_extract_instance_types(): { # minimum accepted behavior, on demand instance type from typical Senza "input": {"InstanceType": "foo"}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}, - "strategy": {"fallbackToOd": True}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}}, }, { # both on demand instance type from typical Senza and spot alternatives specified "input": {"InstanceType": "foo", "SpotAlternatives": ["bar", "baz"]}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}, - "strategy": {"fallbackToOd": True}}, - }, - { # only spot alternatives specified - "input": {"SpotAlternatives": ["foo", "bar"]}, - "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"spot": ["foo", "bar"]}}, - "strategy": {"fallbackToOd": False}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}}, }, ] for test_case in test_cases:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock", "responses" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
arrow==1.3.0 boto3==1.37.23 botocore==1.37.23 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 clickclick==20.10.2 coverage==7.8.0 dnspython==2.7.0 exceptiongroup==1.2.2 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 jmespath==1.0.1 mock==5.2.0 packaging==24.2 pluggy==1.5.0 pystache==0.6.8 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 PyYAML==6.0.2 raven==6.10.0 requests==2.32.3 responses==0.25.7 s3transfer==0.11.4 six==1.17.0 stups-cli-support==1.1.22 stups-pierone==1.1.56 -e git+https://github.com/zalando-stups/senza.git@d5477538a198df36914cdd2dbe9e10accb4dec5f#egg=stups_senza stups-tokens==1.1.19 stups-zign==1.2 tomli==2.2.1 types-python-dateutil==2.9.0.20241206 typing==3.7.4.3 urllib3==1.26.20 zipp==3.21.0
name: senza channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - arrow==1.3.0 - boto3==1.37.23 - botocore==1.37.23 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - clickclick==20.10.2 - coverage==7.8.0 - dnspython==2.7.0 - exceptiongroup==1.2.2 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jmespath==1.0.1 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pystache==0.6.8 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - raven==6.10.0 - requests==2.32.3 - responses==0.25.7 - s3transfer==0.11.4 - six==1.17.0 - stups-cli-support==1.1.22 - stups-pierone==1.1.56 - stups-tokens==1.1.19 - stups-zign==1.2 - tomli==2.2.1 - types-python-dateutil==2.9.0.20241206 - typing==3.7.4.3 - urllib3==1.26.20 - zipp==3.21.0 prefix: /opt/conda/envs/senza
[ "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_missing_instance_type", "tests/test_elastigroup.py::test_extract_instance_types" ]
[]
[ "tests/test_elastigroup.py::test_component_elastigroup_defaults", "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_spotinst_account_resolution", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_prediction_strategy", "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_standard_tags", "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_extract_image_id", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_extract_instance_profile" ]
[]
Apache License 2.0
2,778
[ "setup.py", "spotinst/components/elastigroup.py", "requirements.txt" ]
[ "setup.py", "spotinst/components/elastigroup.py", "requirements.txt" ]
zalando-stups__senza-522
d5477538a198df36914cdd2dbe9e10accb4dec5f
2018-07-14 18:04:16
e9f84724628b4761f8d5da4d37a2993f11d6433b
coveralls: [![Coverage Status](https://coveralls.io/builds/17987902/badge)](https://coveralls.io/builds/17987902) Coverage remained the same at 89.549% when pulling **1c1c852d60a269bf5ab1c6e765940fdfde64fcd3 on lmineiro:fix-elastigroup-ondemand** into **d5477538a198df36914cdd2dbe9e10accb4dec5f on zalando-stups:master**. lmineiro: 👍 jmcs: :+1:
diff --git a/spotinst/components/elastigroup.py b/spotinst/components/elastigroup.py index aa916e9..dca20f4 100644 --- a/spotinst/components/elastigroup.py +++ b/spotinst/components/elastigroup.py @@ -20,7 +20,12 @@ from spotinst import MissingSpotinstAccount SPOTINST_LAMBDA_FORMATION_ARN = 'arn:aws:lambda:{}:178579023202:function:spotinst-cloudformation' SPOTINST_API_URL = 'https://api.spotinst.io' -ELASTIGROUP_DEFAULT_STRATEGY = {"risk": 100, "availabilityVsCost": "balanced", "utilizeReservedInstances": True} +ELASTIGROUP_DEFAULT_STRATEGY = { + "risk": 100, + "availabilityVsCost": "balanced", + "utilizeReservedInstances": True, + "fallbackToOd": True, +} ELASTIGROUP_DEFAULT_PRODUCT = "Linux/UNIX" @@ -432,20 +437,16 @@ def extract_instance_types(configuration, elastigroup_config): are no SpotAlternatives the Elastigroup will have the same ondemand type as spot alternative If there's already a compute.instanceTypes config it will be left untouched """ - elastigroup_config = ensure_keys(ensure_keys(elastigroup_config, "strategy"), "compute") + elastigroup_config = ensure_keys(elastigroup_config, "compute") compute_config = elastigroup_config["compute"] - instance_type = configuration.pop("InstanceType", None) + + if "InstanceType" not in configuration: + raise click.UsageError("You need to specify the InstanceType attribute to be able to use Elastigroups") + instance_type = configuration.pop("InstanceType") spot_alternatives = configuration.pop("SpotAlternatives", None) if "instanceTypes" not in compute_config: - if not (instance_type or spot_alternatives): - raise click.UsageError("You have to specify one of InstanceType or SpotAlternatives") instance_types = {} - strategy = elastigroup_config["strategy"] - if instance_type: - instance_types.update({"ondemand": instance_type}) - strategy.update({"fallbackToOd": True}) - else: - strategy.update({"fallbackToOd": False}) + instance_types.update({"ondemand": instance_type}) if spot_alternatives: instance_types.update({"spot": spot_alternatives}) else:
Make Elastigroup's On Demand not optional The current implementation allows users not to specify the `InstanceType` attribute which would later be translated to the Elastigroups `ondemand` attribute. [This attribute is mandatory according to the API](https://api.spotinst.com/elastigroup/amazon-web-services-2/create/#compute.instanceTypes.ondemand) and [Spotint's recommendations](https://github.com/zalando-stups/senza/pull/516#pullrequestreview-136726224). The stack would not be created. The fallbackToOd should always be set to True given that `ondemand` is mandatory.
zalando-stups/senza
diff --git a/tests/test_elastigroup.py b/tests/test_elastigroup.py index f77ccc1..0fcb9e4 100644 --- a/tests/test_elastigroup.py +++ b/tests/test_elastigroup.py @@ -598,9 +598,11 @@ def test_extract_security_group_ids(monkeypatch): assert test_case["expected_sgs"] == got["compute"]["launchSpecification"].get("securityGroupIds") -def test_missing_instance_types(): +def test_missing_instance_type(): with pytest.raises(click.UsageError): extract_instance_types({}, {}) + with pytest.raises(click.UsageError): + extract_instance_types({"SpotAlternatives": ["foo", "bar", "baz"]}, {}) def test_extract_instance_types(): @@ -608,20 +610,12 @@ def test_extract_instance_types(): { # minimum accepted behavior, on demand instance type from typical Senza "input": {"InstanceType": "foo"}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}, - "strategy": {"fallbackToOd": True}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["foo"]}}}, }, { # both on demand instance type from typical Senza and spot alternatives specified "input": {"InstanceType": "foo", "SpotAlternatives": ["bar", "baz"]}, "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}, - "strategy": {"fallbackToOd": True}}, - }, - { # only spot alternatives specified - "input": {"SpotAlternatives": ["foo", "bar"]}, - "given_config": {}, - "expected_config": {"compute": {"instanceTypes": {"spot": ["foo", "bar"]}}, - "strategy": {"fallbackToOd": False}}, + "expected_config": {"compute": {"instanceTypes": {"ondemand": "foo", "spot": ["bar", "baz"]}}}, }, ] for test_case in test_cases:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "mock", "responses" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
arrow==1.3.0 boto3==1.37.23 botocore==1.37.23 certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 clickclick==20.10.2 coverage==7.8.0 dnspython==2.7.0 exceptiongroup==1.2.2 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 jmespath==1.0.1 mock==5.2.0 packaging==24.2 pluggy==1.5.0 pystache==0.6.8 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 PyYAML==6.0.2 raven==6.10.0 requests==2.32.3 responses==0.25.7 s3transfer==0.11.4 six==1.17.0 stups-cli-support==1.1.22 stups-pierone==1.1.56 -e git+https://github.com/zalando-stups/senza.git@d5477538a198df36914cdd2dbe9e10accb4dec5f#egg=stups_senza stups-tokens==1.1.19 stups-zign==1.2 tomli==2.2.1 types-python-dateutil==2.9.0.20241206 typing==3.7.4.3 urllib3==1.26.20 zipp==3.21.0
name: senza channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - arrow==1.3.0 - boto3==1.37.23 - botocore==1.37.23 - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - clickclick==20.10.2 - coverage==7.8.0 - dnspython==2.7.0 - exceptiongroup==1.2.2 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jmespath==1.0.1 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pystache==0.6.8 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - raven==6.10.0 - requests==2.32.3 - responses==0.25.7 - s3transfer==0.11.4 - six==1.17.0 - stups-cli-support==1.1.22 - stups-pierone==1.1.56 - stups-tokens==1.1.19 - stups-zign==1.2 - tomli==2.2.1 - types-python-dateutil==2.9.0.20241206 - typing==3.7.4.3 - urllib3==1.26.20 - zipp==3.21.0 prefix: /opt/conda/envs/senza
[ "tests/test_elastigroup.py::test_missing_instance_type", "tests/test_elastigroup.py::test_extract_instance_types" ]
[]
[ "tests/test_elastigroup.py::test_component_elastigroup_defaults", "tests/test_elastigroup.py::test_missing_access_token", "tests/test_elastigroup.py::test_spotinst_account_resolution", "tests/test_elastigroup.py::test_spotinst_account_resolution_failure", "tests/test_elastigroup.py::test_block_mappings", "tests/test_elastigroup.py::test_auto_scaling_rules", "tests/test_elastigroup.py::test_detailed_monitoring", "tests/test_elastigroup.py::test_prediction_strategy", "tests/test_elastigroup.py::test_autoscaling_capacity", "tests/test_elastigroup.py::test_product", "tests/test_elastigroup.py::test_standard_tags", "tests/test_elastigroup.py::test_extract_subnets", "tests/test_elastigroup.py::test_load_balancers", "tests/test_elastigroup.py::test_public_ips", "tests/test_elastigroup.py::test_extract_image_id", "tests/test_elastigroup.py::test_extract_security_group_ids", "tests/test_elastigroup.py::test_extract_instance_profile" ]
[]
Apache License 2.0
2,779
[ "spotinst/components/elastigroup.py" ]
[ "spotinst/components/elastigroup.py" ]
fniessink__next-action-162
5d93327fa6f163d540a60103a5f829ca94e443f6
2018-07-14 19:57:03
46f06f9138ef0ca532c7e7ecb281fbbc85880d46
diff --git a/CHANGELOG.md b/CHANGELOG.md index 91e0439..2481f94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [Unreleased] - 2018-07-14 + +### Fixed + +- Allow for using `--config` when generating a configuration file with `--write-config-file` so it is possible to ignore the existing configuration file when generating a new one. Fixes #161. + ## [1.5.2] - 2018-07-07 ### Fixed diff --git a/README.md b/README.md index d78de09..ffb50ba 100644 --- a/README.md +++ b/README.md @@ -283,7 +283,7 @@ $ next-action --write-config-file file: ~/todo.txt number: 1 reference: multiple -style: native +style: default ``` To make this the configuration that *Next-action* reads by default, redirect the output to `~/.next-action.cfg` like this: `next-action --write-config-file > ~/.next-action.cfg`. @@ -448,9 +448,9 @@ To run the unit tests: ```console $ python -m unittest -........................................................................................................................................................................................................................................... +............................................................................................................................................................................................................................................ ---------------------------------------------------------------------- -Ran 235 tests in 2.611s +Ran 236 tests in 2.931s OK ``` @@ -461,9 +461,9 @@ To create the unit test coverage report run the unit tests under coverage with: ```console $ coverage run --branch -m unittest -........................................................................................................................................................................................................................................... +............................................................................................................................................................................................................................................ ---------------------------------------------------------------------- -Ran 235 tests in 2.784s +Ran 236 tests in 3.557s OK ``` @@ -475,7 +475,7 @@ $ coverage report --fail-under=100 --omit=".venv/*" --skip-covered Name Stmts Miss Branch BrPart Cover ----------------------------------------- ----------------------------------------- -TOTAL 1341 0 173 0 100% +TOTAL 1347 0 173 0 100% 25 files skipped due to complete coverage. ``` diff --git a/docs/update_readme.py b/docs/update_readme.py index acdba69..2ff52b6 100644 --- a/docs/update_readme.py +++ b/docs/update_readme.py @@ -10,9 +10,10 @@ import sys def do_command(line): """Run the command on the line and return its stdout and stderr.""" command = shlex.split(line[2:]) - if command[0] == "next-action" and "--write-config-file" not in command: + if command[0] == "next-action": command.insert(1, "--config") - command.insert(2, "docs/.next-action.cfg") + if "--write-config-file" not in command: + command.insert(2, "docs/.next-action.cfg") command_output = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout = command_output.stdout.strip() diff --git a/next_action/arguments/parser.py b/next_action/arguments/parser.py index 35c9f6a..3184c55 100644 --- a/next_action/arguments/parser.py +++ b/next_action/arguments/parser.py @@ -50,12 +50,11 @@ class NextActionArgumentParser(argparse.ArgumentParser): def add_configuration_options(self) -> None: """Add the configuration options to the parser.""" config_group = self.add_argument_group("Configuration options") - config_file = config_group.add_mutually_exclusive_group() - config_file.add_argument( + config_group.add_argument( "-c", "--config-file", metavar="<config.cfg>", type=str, default="~/.next-action.cfg", nargs="?", help="filename of configuration file to read (default: %(default)s); omit filename to not read any " "configuration file") - config_file.add_argument( + config_group.add_argument( "-w", "--write-config-file", help="generate a sample configuration file and exit", action="store_true") def add_input_options(self) -> None:
Next-action can't be told to not read the current configuration file when generating one ```console $ next-action -w -c Usage: next-action [-h] [--version] [-c [<config.cfg>] | -w] [-f <todo.txt> ...] [-r <ref>] [-s [<style>]] [-a | -n <number>] [-d [<due date>] | -o] [-p [<priority>]] [--] [<context|project> ...] next-action: error: argument -c/--config-file: not allowed with argument -w/--write-config-file ```
fniessink/next-action
diff --git a/tests/unittests/arguments/test_config.py b/tests/unittests/arguments/test_config.py index fadbf58..7eb47f0 100644 --- a/tests/unittests/arguments/test_config.py +++ b/tests/unittests/arguments/test_config.py @@ -161,6 +161,16 @@ class WriteConfigFileTest(ConfigTestCase): expected += "file: ~/todo.txt\nnumber: 3\nreference: multiple\nstyle: default\n" self.assertEqual([call(expected)], mock_stdout_write.call_args_list) + @patch.object(sys, "argv", ["next-action", "--write-config-file", "--config"]) + @patch.object(config, "open", mock_open(read_data="number: 3")) + @patch.object(sys.stdout, "write") + def test_ignore_config(self, mock_stdout_write): + """Test that the written config file does not contain the read config file.""" + self.assertRaises(SystemExit, parse_arguments) + expected = "# Configuration file for Next-action. Edit the settings below as you like.\n" + expected += "file: ~/todo.txt\nnumber: 1\nreference: multiple\nstyle: default\n" + self.assertEqual([call(expected)], mock_stdout_write.call_args_list) + class FilenameTest(ConfigTestCase): """Unit tests for the config file parameter."""
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "coverage", "mypy", "pylint", "pycodestyle", "pydocstyle", "pytest" ], "pre_install": [], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 Cerberus==1.2 coverage==7.8.0 dateparser==0.7.0 dill==0.3.9 exceptiongroup==1.2.2 iniconfig==2.1.0 isort==6.0.1 mccabe==0.7.0 mypy==1.15.0 mypy-extensions==1.0.0 -e git+https://github.com/fniessink/next-action.git@5d93327fa6f163d540a60103a5f829ca94e443f6#egg=next_action packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pydocstyle==6.3.0 Pygments==2.2.0 pylint==3.3.6 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==3.13 regex==2024.11.6 six==1.17.0 snowballstemmer==2.2.0 tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0 tzlocal==5.3.1
name: next-action channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - cerberus==1.2 - coverage==7.8.0 - dateparser==0.7.0 - dill==0.3.9 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - isort==6.0.1 - mccabe==0.7.0 - mypy==1.15.0 - mypy-extensions==1.0.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pydocstyle==6.3.0 - pygments==2.2.0 - pylint==3.3.6 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==3.13 - regex==2024.11.6 - six==1.17.0 - snowballstemmer==2.2.0 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 - tzlocal==5.3.1 prefix: /opt/conda/envs/next-action
[ "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_ignore_config" ]
[]
[ "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_empty_file", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_error_opening", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_error_parsing", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_file_not_found", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_invalid_document", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_missing_default_config", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_no_file_key", "tests/unittests/arguments/test_config.py::ReadConfigFileTest::test_skip_config", "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_default_file", "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_multiple_files", "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_priority", "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_read_config", "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_show_all", "tests/unittests/arguments/test_config.py::WriteConfigFileTest::test_with_args", "tests/unittests/arguments/test_config.py::FilenameTest::test_cli_takes_precedence", "tests/unittests/arguments/test_config.py::FilenameTest::test_invalid_filename", "tests/unittests/arguments/test_config.py::FilenameTest::test_valid_and_invalid", "tests/unittests/arguments/test_config.py::FilenameTest::test_valid_file", "tests/unittests/arguments/test_config.py::FilenameTest::test_valid_files", "tests/unittests/arguments/test_config.py::NumberTest::test_all_and_number", "tests/unittests/arguments/test_config.py::NumberTest::test_all_false", "tests/unittests/arguments/test_config.py::NumberTest::test_all_true", "tests/unittests/arguments/test_config.py::NumberTest::test_argument_all_overrides", "tests/unittests/arguments/test_config.py::NumberTest::test_argument_nr_overrides", "tests/unittests/arguments/test_config.py::NumberTest::test_cli_takes_precedence", "tests/unittests/arguments/test_config.py::NumberTest::test_invalid_number", "tests/unittests/arguments/test_config.py::NumberTest::test_valid_number", "tests/unittests/arguments/test_config.py::NumberTest::test_zero", "tests/unittests/arguments/test_config.py::ConfigStyleTest::test_cancel_style", "tests/unittests/arguments/test_config.py::ConfigStyleTest::test_invalid_style", "tests/unittests/arguments/test_config.py::ConfigStyleTest::test_override_style", "tests/unittests/arguments/test_config.py::ConfigStyleTest::test_valid_style", "tests/unittests/arguments/test_config.py::PriorityTest::test_cancel_priority", "tests/unittests/arguments/test_config.py::PriorityTest::test_invalid_priority", "tests/unittests/arguments/test_config.py::PriorityTest::test_override_priority", "tests/unittests/arguments/test_config.py::PriorityTest::test_override_short", "tests/unittests/arguments/test_config.py::PriorityTest::test_valid_priority", "tests/unittests/arguments/test_config.py::ReferenceTest::test_invalid_priority", "tests/unittests/arguments/test_config.py::ReferenceTest::test_override", "tests/unittests/arguments/test_config.py::ReferenceTest::test_valid_reference", "tests/unittests/arguments/test_config.py::FiltersTest::test_context", "tests/unittests/arguments/test_config.py::FiltersTest::test_context_list", "tests/unittests/arguments/test_config.py::FiltersTest::test_contexts", "tests/unittests/arguments/test_config.py::FiltersTest::test_excluded_context", "tests/unittests/arguments/test_config.py::FiltersTest::test_excluded_project", "tests/unittests/arguments/test_config.py::FiltersTest::test_ignore_project_not_context", "tests/unittests/arguments/test_config.py::FiltersTest::test_invalid_filter_list", "tests/unittests/arguments/test_config.py::FiltersTest::test_invalid_filter_string", "tests/unittests/arguments/test_config.py::FiltersTest::test_inverse_project", "tests/unittests/arguments/test_config.py::FiltersTest::test_project", "tests/unittests/arguments/test_config.py::FiltersTest::test_projects", "tests/unittests/arguments/test_config.py::FiltersTest::test_same_project" ]
[]
Apache License 2.0
2,780
[ "docs/update_readme.py", "next_action/arguments/parser.py", "README.md", "CHANGELOG.md" ]
[ "docs/update_readme.py", "next_action/arguments/parser.py", "README.md", "CHANGELOG.md" ]
dask__dask-3767
06248f39d44d45ff375ee364a1cfa593c161c990
2018-07-14 21:52:11
b8816eb498bfe4a24ace89484b2df2af3d181bfe
diff --git a/.gitignore b/.gitignore index 7fc3d3c76..f1f69b4c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.hypothesis *.py[cod] __pycache__/ *.egg-info diff --git a/dask/array/core.py b/dask/array/core.py index 384bc1fdc..a8e11b807 100644 --- a/dask/array/core.py +++ b/dask/array/core.py @@ -1337,6 +1337,7 @@ class Array(DaskMethodsMixin): self.dtype = y.dtype self.dask = y.dask self.name = y.name + self._chunks = y.chunks return self else: raise NotImplementedError("Item assignment with %s not supported" diff --git a/dask/array/random.py b/dask/array/random.py index 5d5a6bb0b..8f299e7d0 100644 --- a/dask/array/random.py +++ b/dask/array/random.py @@ -217,6 +217,11 @@ class RandomState(object): size = (size,) chunks = normalize_chunks(chunks, size, dtype=np.float64) + if not replace and len(chunks[0]) > 1: + err_msg = ('replace=False is not currently supported for ' + 'dask.array.choice with multi-chunk output ' + 'arrays') + raise NotImplementedError(err_msg) sizes = list(product(*chunks)) state_data = random_state_data(len(sizes), self._numpy_state) diff --git a/docs/README.rst b/docs/README.rst index bb5afdb9d..932b24eb2 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -5,6 +5,6 @@ these commands suffice:: git clone [email protected]:dask/dask.git cd dask/docs conda create -n daskdocs --file requirements-docs.txt - source activate daskdocs + conda activate daskdocs make html open build/html/index.html diff --git a/docs/source/array-api.rst b/docs/source/array-api.rst index 4abb29387..73e4da786 100644 --- a/docs/source/array-api.rst +++ b/docs/source/array-api.rst @@ -338,7 +338,7 @@ Create and Store Arrays Generalized Ufuncs ~~~~~~~~~~~~~~~~~~ -.. currentmodule:: dask.array +.. currentmodule:: dask.array.gufunc .. autosummary:: apply_gufunc
Dask array setitem with differently chunked data provides wrong shape ```python >>> import dask.array as da >>> x = da.zeros(5, chunks=2) >>> m = da.zeros(5, chunks=3) >>> x[m>0] = 1 >>> print x.shape, x.compute().shape (5,) (4,) ``` This should probably unifiy chunks before slicing
dask/dask
diff --git a/dask/array/tests/test_random.py b/dask/array/tests/test_random.py index 4624abf8e..5d270419c 100644 --- a/dask/array/tests/test_random.py +++ b/dask/array/tests/test_random.py @@ -268,6 +268,14 @@ def test_choice(): with pytest.raises(ValueError): da.random.choice(a, size=size, chunks=chunks, p=p) + with pytest.raises(NotImplementedError): + da.random.choice(da_a, size=size, chunks=chunks, replace=False) + + # Want to make sure replace=False works for a single-partition output array + x = da.random.choice(da_a, size=da_a.shape[0], chunks=-1, replace=False) + res = x.compute() + assert len(res) == len(np.unique(res)) + def test_create_with_auto_dimensions(): with dask.config.set({'array.chunk-size': '128MiB'}): diff --git a/dask/array/tests/test_slicing.py b/dask/array/tests/test_slicing.py index 6ed1a3606..5fc3c79d2 100644 --- a/dask/array/tests/test_slicing.py +++ b/dask/array/tests/test_slicing.py @@ -798,3 +798,19 @@ def test_pathological_unsorted_slicing(): assert '10' in str(info.list[0]) assert 'out-of-order' in str(info.list[0]) + + [email protected]('params', [(2, 2, 1), (5, 3, 2)]) +def test_setitem_with_different_chunks_preserves_shape(params): + """ Reproducer for https://github.com/dask/dask/issues/3730. + + Mutating based on an array with different chunks can cause new chunks to be + used. We need to ensure those new chunk sizes are applied to the mutated + array, otherwise the array won't generate the correct keys. + """ + array_size, chunk_size1, chunk_size2 = params + x = da.zeros(array_size, chunks=chunk_size1) + mask = da.zeros(array_size, chunks=chunk_size2) + x[mask] = 1 + result = x.compute() + assert x.shape == result.shape diff --git a/dask/dataframe/io/tests/test_hdf.py b/dask/dataframe/io/tests/test_hdf.py index 7a316a6ce..9252ee399 100644 --- a/dask/dataframe/io/tests/test_hdf.py +++ b/dask/dataframe/io/tests/test_hdf.py @@ -535,6 +535,7 @@ def test_read_hdf_doesnt_segfault(): def test_hdf_filenames(): + pytest.importorskip('tables') df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'], 'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.]) ddf = dd.from_pandas(df, npartitions=2)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 5 }
0.18
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 click==8.0.4 cloudpickle==2.2.1 -e git+https://github.com/dask/dask.git@06248f39d44d45ff375ee364a1cfa593c161c990#egg=dask distributed==1.28.1 HeapDict==1.0.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack==1.0.5 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 partd==1.2.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 six==1.17.0 sortedcontainers==2.4.0 tblib==1.7.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work toolz==0.12.0 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zict==2.1.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.0.4 - cloudpickle==2.2.1 - distributed==1.28.1 - heapdict==1.0.1 - locket==1.0.0 - msgpack==1.0.5 - numpy==1.19.5 - pandas==1.1.5 - partd==1.2.0 - psutil==7.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==1.7.0 - toolz==0.12.0 - tornado==6.1 - zict==2.1.0 prefix: /opt/conda/envs/dask
[ "dask/array/tests/test_random.py::test_choice", "dask/array/tests/test_slicing.py::test_setitem_with_different_chunks_preserves_shape[params0]", "dask/array/tests/test_slicing.py::test_setitem_with_different_chunks_preserves_shape[params1]" ]
[]
[ "dask/array/tests/test_random.py::test_RandomState", "dask/array/tests/test_random.py::test_concurrency", "dask/array/tests/test_random.py::test_doc_randomstate", "dask/array/tests/test_random.py::test_serializability", "dask/array/tests/test_random.py::test_determinisim_through_dask_values", "dask/array/tests/test_random.py::test_randomstate_consistent_names", "dask/array/tests/test_random.py::test_random", "dask/array/tests/test_random.py::test_parametrized_random_function", "dask/array/tests/test_random.py::test_kwargs", "dask/array/tests/test_random.py::test_unique_names", "dask/array/tests/test_random.py::test_docs", "dask/array/tests/test_random.py::test_can_make_really_big_random_array", "dask/array/tests/test_random.py::test_random_seed", "dask/array/tests/test_random.py::test_consistent_across_sizes", "dask/array/tests/test_random.py::test_random_all", "dask/array/tests/test_random.py::test_array_broadcasting", "dask/array/tests/test_random.py::test_multinomial", "dask/array/tests/test_random.py::test_create_with_auto_dimensions", "dask/array/tests/test_random.py::test_names", "dask/array/tests/test_slicing.py::test_slice_1d", "dask/array/tests/test_slicing.py::test_slice_singleton_value_on_boundary", "dask/array/tests/test_slicing.py::test_slice_array_1d", "dask/array/tests/test_slicing.py::test_slice_array_2d", "dask/array/tests/test_slicing.py::test_slice_optimizations", "dask/array/tests/test_slicing.py::test_slicing_with_singleton_indices", "dask/array/tests/test_slicing.py::test_slicing_with_newaxis", "dask/array/tests/test_slicing.py::test_take", "dask/array/tests/test_slicing.py::test_take_sorted", "dask/array/tests/test_slicing.py::test_slicing_chunks", "dask/array/tests/test_slicing.py::test_slicing_with_numpy_arrays", "dask/array/tests/test_slicing.py::test_slicing_and_chunks", "dask/array/tests/test_slicing.py::test_slicing_identities", "dask/array/tests/test_slicing.py::test_slice_stop_0", "dask/array/tests/test_slicing.py::test_slice_list_then_None", "dask/array/tests/test_slicing.py::test_slicing_with_negative_step_flops_keys", "dask/array/tests/test_slicing.py::test_empty_slice", "dask/array/tests/test_slicing.py::test_multiple_list_slicing", "dask/array/tests/test_slicing.py::test_boolean_list_slicing", "dask/array/tests/test_slicing.py::test_boolean_numpy_array_slicing", "dask/array/tests/test_slicing.py::test_empty_list", "dask/array/tests/test_slicing.py::test_uneven_chunks", "dask/array/tests/test_slicing.py::test_new_blockdim", "dask/array/tests/test_slicing.py::test_slicing_consistent_names", "dask/array/tests/test_slicing.py::test_slicing_consistent_names_after_normalization", "dask/array/tests/test_slicing.py::test_sanitize_index_element", "dask/array/tests/test_slicing.py::test_sanitize_index", "dask/array/tests/test_slicing.py::test_uneven_blockdims", "dask/array/tests/test_slicing.py::test_oob_check", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[None-None]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[None-3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[None-2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[None-1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks1-None]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks1-3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks1-2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks1-1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks2-None]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks2-3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks2-2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks2-1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks3-None]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks3-3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks3-2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks3-1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks4-None]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks4-3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks4-2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array[x_chunks4-1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_0d[1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_0d[2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_0d[3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_nanchunks[1]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_nanchunks[2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_nanchunks[3]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_nanchunks[4]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_nanchunks[5]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_negindex[2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_negindex[4]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_indexerror[2]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_indexerror[4]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[int8]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[int16]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[int32]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[int64]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[uint8]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[uint16]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[uint32]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_dtypes[uint64]", "dask/array/tests/test_slicing.py::test_index_with_int_dask_array_nocompute", "dask/array/tests/test_slicing.py::test_index_with_bool_dask_array", "dask/array/tests/test_slicing.py::test_index_with_bool_dask_array_2", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index0-shape0]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index0-shape1]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index0-shape2]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index1-shape0]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index1-shape1]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index1-shape2]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index2-shape0]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index2-shape1]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index2-shape2]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index3-shape0]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index3-shape1]", "dask/array/tests/test_slicing.py::test_slicing_with_Nones[index3-shape2]", "dask/array/tests/test_slicing.py::test_slicing_integer_no_warnings", "dask/array/tests/test_slicing.py::test_None_overlap_int", "dask/array/tests/test_slicing.py::test_negative_n_slicing", "dask/array/tests/test_slicing.py::test_negative_list_slicing", "dask/array/tests/test_slicing.py::test_permit_oob_slices", "dask/array/tests/test_slicing.py::test_normalize_index", "dask/array/tests/test_slicing.py::test_take_semi_sorted", "dask/array/tests/test_slicing.py::test_slicing_plan[chunks0-index0-expected0]", "dask/array/tests/test_slicing.py::test_slicing_plan[chunks1-index1-expected1]", "dask/array/tests/test_slicing.py::test_slicing_plan[chunks2-index2-expected2]", "dask/array/tests/test_slicing.py::test_pathological_unsorted_slicing" ]
[]
BSD 3-Clause "New" or "Revised" License
2,781
[ "docs/README.rst", "docs/source/array-api.rst", "dask/array/random.py", ".gitignore", "dask/array/core.py" ]
[ "docs/README.rst", "docs/source/array-api.rst", "dask/array/random.py", ".gitignore", "dask/array/core.py" ]
hylang__hy-1661
5bfd4592f6c5df063afbcd9c67ea40877f4ed52e
2018-07-15 19:21:37
e2d6640e8cd4eee4a92876bb4d232d30c2d66d2f
diff --git a/NEWS.rst b/NEWS.rst index d4255efa..2ec6b165 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -13,6 +13,11 @@ New Features shorthand for `(get obj :key)`, and they accept a default value as a second argument. +Bug Fixes +------------------------------ +* Fixed bugs in the handling of unpacking forms in method calls and + attribute access. + 0.15.0 ============================== diff --git a/docs/language/api.rst b/docs/language/api.rst index cb048a35..ab0c0418 100644 --- a/docs/language/api.rst +++ b/docs/language/api.rst @@ -458,7 +458,7 @@ arguments constitute the body of the function. (defn name [params] bodyform1 bodyform2...) If there at least two body forms, and the first of them is a string literal, -this string becomes the :ref:`docstring <py:docstring>` of the function. +this string becomes the :term:`py:docstring` of the function. Parameters may be prefixed with the following special symbols. If you use more than one, they can only appear in the given order (so all `&optional` diff --git a/hy/compiler.py b/hy/compiler.py index 4f4f694e..7dee28c8 100755 --- a/hy/compiler.py +++ b/hy/compiler.py @@ -6,8 +6,8 @@ from hy.models import (HyObject, HyExpression, HyKeyword, HyInteger, HyComplex, HyString, HyBytes, HySymbol, HyFloat, HyList, HySet, HyDict, HySequence, wrap_value) -from hy.model_patterns import (FORM, SYM, STR, sym, brackets, whole, notpexpr, - dolike, pexpr, times, Tag, tag) +from hy.model_patterns import (FORM, SYM, KEYWORD, STR, sym, brackets, whole, + notpexpr, dolike, pexpr, times, Tag, tag, unpack) from funcparserlib.parser import some, many, oneplus, maybe, NoParseError from hy.errors import HyCompileError, HyTypeError @@ -1539,98 +1539,97 @@ class HyASTCompiler(object): else Result()) @builds_model(HyExpression) - def compile_expression(self, expression): + def compile_expression(self, expr): # Perform macro expansions - expression = macroexpand(expression, self) - if not isinstance(expression, HyExpression): + expr = macroexpand(expr, self) + if not isinstance(expr, HyExpression): # Go through compile again if the type changed. - return self.compile(expression) + return self.compile(expr) - if not expression: + if not expr: raise HyTypeError( - expression, "empty expressions are not allowed at top level") + expr, "empty expressions are not allowed at top level") - fn = expression[0] + args = list(expr) + root = args.pop(0) func = None - if isinstance(fn, HySymbol): + if isinstance(root, HySymbol): - # First check if `fn` is a special operator, unless it has an + # First check if `root` is a special operator, unless it has an # `unpack-iterable` in it, since Python's operators (`+`, # etc.) can't unpack. An exception to this exception is that - # tuple literals (`,`) can unpack. - sfn = ast_str(fn) - if (sfn in _special_form_compilers or sfn in _bad_roots) and ( - sfn == mangle(",") or - not any(is_unpack("iterable", x) for x in expression[1:])): - if sfn in _bad_roots: + # tuple literals (`,`) can unpack. Finally, we allow unpacking in + # `.` forms here so the user gets a better error message. + sroot = ast_str(root) + if (sroot in _special_form_compilers or sroot in _bad_roots) and ( + sroot in (mangle(","), mangle(".")) or + not any(is_unpack("iterable", x) for x in args)): + if sroot in _bad_roots: raise HyTypeError( - expression, - "The special form '{}' is not allowed here".format(fn)) - # `sfn` is a special operator. Get the build method and + expr, + "The special form '{}' is not allowed here".format(root)) + # `sroot` is a special operator. Get the build method and # pattern-match the arguments. - build_method, pattern = _special_form_compilers[sfn] + build_method, pattern = _special_form_compilers[sroot] try: - parse_tree = pattern.parse(expression[1:]) + parse_tree = pattern.parse(args) except NoParseError as e: raise HyTypeError( - expression[min(e.state.pos + 1, len(expression) - 1)], + expr[min(e.state.pos + 1, len(expr) - 1)], "parse error for special form '{}': {}".format( - expression[0], + root, e.msg.replace("<EOF>", "end of form"))) return Result() + build_method( - self, expression, unmangle(sfn), *parse_tree) + self, expr, unmangle(sroot), *parse_tree) - if fn.startswith("."): + if root.startswith("."): # (.split "test test") -> "test test".split() - # (.a.b.c x) -> (.c (. x a b)) -> x.a.b.c() + # (.a.b.c x v1 v2) -> (.c (. x a b) v1 v2) -> x.a.b.c(v1, v2) # Get the method name (the last named attribute # in the chain of attributes) - attrs = [HySymbol(a).replace(fn) for a in fn.split(".")[1:]] - fn = attrs.pop() + attrs = [HySymbol(a).replace(root) for a in root.split(".")[1:]] + root = attrs.pop() # Get the object we're calling the method on # (extracted with the attribute access DSL) - i = 1 - if len(expression) != 2: - # If the expression has only one object, - # always use that as the callee. - # Otherwise, hunt for the first thing that - # isn't a keyword argument or its value. - while i < len(expression): - if isinstance(expression[i], HyKeyword): - # Skip the keyword argument and its value. - i += 1 - else: - # Use expression[i]. - break - i += 1 - else: - raise HyTypeError(expression, - "attribute access requires object") + # Skip past keywords and their arguments. + try: + kws, obj, rest = ( + many(KEYWORD + FORM | unpack("mapping")) + + FORM + + many(FORM)).parse(args) + except NoParseError: + raise HyTypeError( + expr, "attribute access requires object") + # Reconstruct `args` to exclude `obj`. + args = [x for p in kws for x in p] + list(rest) + if is_unpack("iterable", obj): + raise HyTypeError( + obj, "can't call a method on an unpacking form") func = self.compile(HyExpression( - [HySymbol(".").replace(fn), expression.pop(i)] + + [HySymbol(".").replace(root), obj] + attrs)) # And get the method - func += asty.Attribute(fn, + func += asty.Attribute(root, value=func.force_expr, - attr=ast_str(fn), + attr=ast_str(root), ctx=ast.Load()) if not func: - func = self.compile(fn) + func = self.compile(root) # An exception for pulling together keyword args is if we're doing # a typecheck, eg (type :foo) - with_kwargs = fn not in ( + with_kwargs = root not in ( "type", "HyKeyword", "keyword", "name", "keyword?", "identity") args, ret, keywords, oldpy_star, oldpy_kw = self._compile_collect( - expression[1:], with_kwargs, oldpy_unpack=True) + args, with_kwargs, oldpy_unpack=True) return func + ret + asty.Call( - expression, func=func.expr, args=args, keywords=keywords, + expr, func=func.expr, args=args, keywords=keywords, starargs=oldpy_star, kwargs=oldpy_kw) @builds_model(HyInteger, HyFloat, HyComplex) diff --git a/hy/model_patterns.py b/hy/model_patterns.py index 1d30ccfa..52917683 100644 --- a/hy/model_patterns.py +++ b/hy/model_patterns.py @@ -15,6 +15,7 @@ from math import isinf FORM = some(lambda _: True) SYM = some(lambda x: isinstance(x, HySymbol)) +KEYWORD = some(lambda x: isinstance(x, HyKeyword)) STR = some(lambda x: isinstance(x, HyString)) def sym(wanted): @@ -57,6 +58,14 @@ def notpexpr(*disallowed_heads): isinstance(x[0], HySymbol) and x[0] in disallowed_heads)) +def unpack(kind): + "Parse an unpacking form, returning it unchanged." + return some(lambda x: + isinstance(x, HyExpression) + and len(x) > 0 + and isinstance(x[0], HySymbol) + and x[0] == "unpack-" + kind) + def times(lo, hi, parser): """Parse `parser` several times (`lo` to `hi`) in a row. `hi` can be float('inf'). The result is a list no matter the number of instances."""
`(.foo #* bar)` raises a compile-time RecursionError Maybe we'll decide not to implement this construct. Even then, we should avoid the infinite recursion.
hylang/hy
diff --git a/tests/compilers/test_ast.py b/tests/compilers/test_ast.py index a5f42af1..322f78c4 100644 --- a/tests/compilers/test_ast.py +++ b/tests/compilers/test_ast.py @@ -16,7 +16,6 @@ from hy._compat import PY3 import ast import pytest - def _ast_spotcheck(arg, root, secondary): if "." in arg: local, full = arg.split(".", 1) @@ -73,6 +72,17 @@ def test_empty_expr(): can_compile("(print '())") +def test_dot_unpacking(): + + can_compile("(.meth obj #* args az)") + cant_compile("(.meth #* args az)") + cant_compile("(. foo #* bar baz)") + + can_compile("(.meth obj #** args az)") + can_compile("(.meth #** args obj)") + cant_compile("(. foo #** bar baz)") + + def test_ast_bad_if(): "Make sure AST can't compile invalid if*" cant_compile("(if*)") @@ -290,7 +300,7 @@ def test_ast_require(): def test_ast_import_require_dotted(): - """As in Python, it should be a compile-type error to attempt to + """As in Python, it should be a compile-time error to attempt to import a dotted name.""" cant_compile("(import [spam [foo.bar]])") cant_compile("(require [spam [foo.bar]])")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
0.15
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 appdirs==1.4.4 args==0.1.0 astor==0.8.1 Babel==2.14.0 certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 clint==0.5.1 coverage==7.2.7 distlib==0.3.9 docutils==0.17.1 exceptiongroup==1.2.2 filelock==3.12.2 flake8==5.0.4 funcparserlib==1.0.1 -e git+https://github.com/hylang/hy.git@5bfd4592f6c5df063afbcd9c67ea40877f4ed52e#egg=hy idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 iniconfig==2.0.0 Jinja2==3.1.6 MarkupSafe==2.1.5 mccabe==0.7.0 packaging==24.0 platformdirs==2.6.2 pluggy==1.2.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.17.2 pytest==7.4.4 pytz==2025.2 requests==2.31.0 rply==0.7.8 six==1.17.0 snowballstemmer==2.2.0 Sphinx==4.3.2 sphinx-rtd-theme==1.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==2.0.1 tox==3.28.0 typing_extensions==4.7.1 urllib3==2.0.7 virtualenv==20.16.2 zipp==3.15.0
name: hy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - appdirs==1.4.4 - args==0.1.0 - astor==0.8.1 - babel==2.14.0 - charset-normalizer==3.4.1 - clint==0.5.1 - coverage==7.2.7 - distlib==0.3.9 - docutils==0.17.1 - exceptiongroup==1.2.2 - filelock==3.12.2 - flake8==5.0.4 - funcparserlib==1.0.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - iniconfig==2.0.0 - jinja2==3.1.6 - markupsafe==2.1.5 - mccabe==0.7.0 - packaging==24.0 - platformdirs==2.6.2 - pluggy==1.2.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.17.2 - pytest==7.4.4 - pytz==2025.2 - requests==2.31.0 - rply==0.7.8 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-rtd-theme==1.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==2.0.1 - tox==3.28.0 - typing-extensions==4.7.1 - urllib3==2.0.7 - virtualenv==20.16.2 - zipp==3.15.0 prefix: /opt/conda/envs/hy
[ "tests/compilers/test_ast.py::test_dot_unpacking" ]
[]
[ "tests/compilers/test_ast.py::test_ast_bad_type", "tests/compilers/test_ast.py::test_empty_expr", "tests/compilers/test_ast.py::test_ast_bad_if", "tests/compilers/test_ast.py::test_ast_valid_if", "tests/compilers/test_ast.py::test_ast_valid_unary_op", "tests/compilers/test_ast.py::test_ast_invalid_unary_op", "tests/compilers/test_ast.py::test_ast_bad_while", "tests/compilers/test_ast.py::test_ast_good_do", "tests/compilers/test_ast.py::test_ast_good_raise", "tests/compilers/test_ast.py::test_ast_raise_from", "tests/compilers/test_ast.py::test_ast_bad_raise", "tests/compilers/test_ast.py::test_ast_good_try", "tests/compilers/test_ast.py::test_ast_bad_try", "tests/compilers/test_ast.py::test_ast_good_except", "tests/compilers/test_ast.py::test_ast_bad_except", "tests/compilers/test_ast.py::test_ast_good_assert", "tests/compilers/test_ast.py::test_ast_bad_assert", "tests/compilers/test_ast.py::test_ast_good_global", "tests/compilers/test_ast.py::test_ast_bad_global", "tests/compilers/test_ast.py::test_ast_good_nonlocal", "tests/compilers/test_ast.py::test_ast_bad_nonlocal", "tests/compilers/test_ast.py::test_ast_good_defclass", "tests/compilers/test_ast.py::test_ast_good_defclass_with_metaclass", "tests/compilers/test_ast.py::test_ast_bad_defclass", "tests/compilers/test_ast.py::test_ast_good_lambda", "tests/compilers/test_ast.py::test_ast_bad_lambda", "tests/compilers/test_ast.py::test_ast_good_yield", "tests/compilers/test_ast.py::test_ast_bad_yield", "tests/compilers/test_ast.py::test_ast_import_mangle_dotted", "tests/compilers/test_ast.py::test_ast_good_import_from", "tests/compilers/test_ast.py::test_ast_require", "tests/compilers/test_ast.py::test_ast_import_require_dotted", "tests/compilers/test_ast.py::test_ast_no_pointless_imports", "tests/compilers/test_ast.py::test_ast_good_get", "tests/compilers/test_ast.py::test_ast_bad_get", "tests/compilers/test_ast.py::test_ast_good_cut", "tests/compilers/test_ast.py::test_ast_bad_cut", "tests/compilers/test_ast.py::test_ast_good_take", "tests/compilers/test_ast.py::test_ast_good_drop", "tests/compilers/test_ast.py::test_ast_good_assoc", "tests/compilers/test_ast.py::test_ast_bad_assoc", "tests/compilers/test_ast.py::test_ast_bad_with", "tests/compilers/test_ast.py::test_ast_valid_while", "tests/compilers/test_ast.py::test_ast_valid_for", "tests/compilers/test_ast.py::test_nullary_break_continue", "tests/compilers/test_ast.py::test_ast_expression_basics", "tests/compilers/test_ast.py::test_ast_anon_fns_basics", "tests/compilers/test_ast.py::test_ast_non_decoratable", "tests/compilers/test_ast.py::test_ast_lambda_lists", "tests/compilers/test_ast.py::test_ast_print", "tests/compilers/test_ast.py::test_ast_tuple", "tests/compilers/test_ast.py::test_lambda_list_keywords_rest", "tests/compilers/test_ast.py::test_lambda_list_keywords_kwargs", "tests/compilers/test_ast.py::test_lambda_list_keywords_kwonly", "tests/compilers/test_ast.py::test_lambda_list_keywords_mixed", "tests/compilers/test_ast.py::test_missing_keyword_argument_value", "tests/compilers/test_ast.py::test_ast_unicode_strings", "tests/compilers/test_ast.py::test_ast_unicode_vs_bytes", "tests/compilers/test_ast.py::test_ast_bracket_string", "tests/compilers/test_ast.py::test_compile_error", "tests/compilers/test_ast.py::test_for_compile_error", "tests/compilers/test_ast.py::test_attribute_access", "tests/compilers/test_ast.py::test_attribute_empty", "tests/compilers/test_ast.py::test_bad_setv", "tests/compilers/test_ast.py::test_defn", "tests/compilers/test_ast.py::test_setv_builtins", "tests/compilers/test_ast.py::test_top_level_unquote", "tests/compilers/test_ast.py::test_lots_of_comment_lines", "tests/compilers/test_ast.py::test_exec_star", "tests/compilers/test_ast.py::test_compiler_macro_tag_try", "tests/compilers/test_ast.py::test_ast_good_yield_from", "tests/compilers/test_ast.py::test_ast_bad_yield_from", "tests/compilers/test_ast.py::test_eval_generator_with_return" ]
[]
MIT License
2,784
[ "hy/compiler.py", "NEWS.rst", "docs/language/api.rst", "hy/model_patterns.py" ]
[ "hy/compiler.py", "NEWS.rst", "docs/language/api.rst", "hy/model_patterns.py" ]
oasis-open__cti-pattern-validator-50
801c2364013d3cc5529f5e0b967def7f505a91e4
2018-07-16 16:55:15
801c2364013d3cc5529f5e0b967def7f505a91e4
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8864e8e..702b1e0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,6 +2,7 @@ sha: ea227f024bd89d638aea319c92806737e3375979 hooks: - id: trailing-whitespace + exclude: stix2patterns/grammars/* - id: flake8 args: - --ignore=F403,F405 diff --git a/stix2patterns/grammars/STIXPatternLexer.py b/stix2patterns/grammars/STIXPatternLexer.py index 8b0ddd1..a6a1f51 100644 --- a/stix2patterns/grammars/STIXPatternLexer.py +++ b/stix2patterns/grammars/STIXPatternLexer.py @@ -9,7 +9,7 @@ import sys def serializedATN(): with StringIO() as buf: buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2") - buf.write(u"\67\u0257\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6") + buf.write(u"\67\u01ef\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6") buf.write(u"\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4") buf.write(u"\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t") buf.write(u"\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27") @@ -18,277 +18,227 @@ def serializedATN(): buf.write(u"#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4") buf.write(u",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62") buf.write(u"\4\63\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t\67\4") - buf.write(u"8\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@") - buf.write(u"\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\t") - buf.write(u"I\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R") - buf.write(u"\tR\4S\tS\3\2\3\2\3\2\3\2\7\2\u00ac\n\2\f\2\16\2\u00af") - buf.write(u"\13\2\5\2\u00b1\n\2\3\3\5\3\u00b4\n\3\3\3\3\3\3\3\7\3") - buf.write(u"\u00b9\n\3\f\3\16\3\u00bc\13\3\5\3\u00be\n\3\3\4\3\4") - buf.write(u"\7\4\u00c2\n\4\f\4\16\4\u00c5\13\4\3\4\3\4\6\4\u00c9") - buf.write(u"\n\4\r\4\16\4\u00ca\3\5\5\5\u00ce\n\5\3\5\7\5\u00d1\n") - buf.write(u"\5\f\5\16\5\u00d4\13\5\3\5\3\5\6\5\u00d8\n\5\r\5\16\5") - buf.write(u"\u00d9\3\6\3\6\3\6\7\6\u00df\n\6\f\6\16\6\u00e2\13\6") - buf.write(u"\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\7\7\u00ed\n\7\f") - buf.write(u"\7\16\7\u00f0\13\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3") - buf.write(u"\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u0103\n\7\3\7") - buf.write(u"\3\7\3\b\3\b\3\b\3\b\3\b\3\b\7\b\u010d\n\b\f\b\16\b\u0110") - buf.write(u"\13\b\3\b\3\b\3\t\3\t\5\t\u0116\n\t\3\n\3\n\3\n\3\n\3") - buf.write(u"\n\3\n\3\n\3\n\3\n\3\n\3\n\5\n\u0123\n\n\3\n\3\n\3\n") - buf.write(u"\3\n\3\n\3\n\3\n\5\n\u012c\n\n\3\n\3\n\3\n\3\n\3\n\5") - buf.write(u"\n\u0133\n\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\5\n\u013d") - buf.write(u"\n\n\3\n\3\n\6\n\u0141\n\n\r\n\16\n\u0142\5\n\u0145\n") - buf.write(u"\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\r\3") - buf.write(u"\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3") - buf.write(u"\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20") - buf.write(u"\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3") - buf.write(u"\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22") - buf.write(u"\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\24\3") - buf.write(u"\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26") - buf.write(u"\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3") - buf.write(u"\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31") - buf.write(u"\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3") - buf.write(u"\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34") - buf.write(u"\3\35\3\35\7\35\u01be\n\35\f\35\16\35\u01c1\13\35\3\36") - buf.write(u"\3\36\7\36\u01c5\n\36\f\36\16\36\u01c8\13\36\3\37\3\37") - buf.write(u"\3\37\5\37\u01cd\n\37\3 \3 \3 \3 \5 \u01d3\n \3!\3!\3") - buf.write(u"\"\3\"\3\"\3#\3#\3$\3$\3$\3%\3%\3&\3&\3\'\3\'\3(\3(\3") - buf.write(u")\3)\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3\60\3") - buf.write(u"\61\3\61\3\62\3\62\3\63\3\63\3\64\3\64\3\65\3\65\3\66") - buf.write(u"\3\66\3\67\3\67\38\38\39\39\3:\3:\3;\3;\3<\3<\3=\3=\3") - buf.write(u">\3>\3?\3?\3@\3@\3A\3A\3B\3B\3C\3C\3D\3D\3E\3E\3F\3F") - buf.write(u"\3G\3G\3H\3H\3I\3I\3J\3J\3K\3K\3L\3L\3M\3M\3N\3N\3N\3") - buf.write(u"O\3O\3P\6P\u0237\nP\rP\16P\u0238\3P\3P\3Q\3Q\3Q\3Q\7") - buf.write(u"Q\u0241\nQ\fQ\16Q\u0244\13Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3") - buf.write(u"R\7R\u024f\nR\fR\16R\u0252\13R\3R\3R\3S\3S\3\u0242\2") - buf.write(u"T\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r") - buf.write(u"\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30") - buf.write(u"/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K") - buf.write(u"\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\2g\2i\2k\2m\2o\2") - buf.write(u"q\2s\2u\2w\2y\2{\2}\2\177\2\u0081\2\u0083\2\u0085\2\u0087") - buf.write(u"\2\u0089\2\u008b\2\u008d\2\u008f\2\u0091\2\u0093\2\u0095") - buf.write(u"\2\u0097\2\u0099\2\u009b\2\u009d\2\u009f\64\u00a1\65") - buf.write(u"\u00a3\66\u00a5\67\3\2+\3\2\63;\3\2\62;\4\2))^^\3\2\62") - buf.write(u"\64\3\2\63\64\3\2\62\63\3\2\62\65\3\2\62\67\5\2C\\aa") - buf.write(u"c|\6\2\62;C\\aac|\7\2//\62;C\\aac|\4\2CCcc\4\2DDdd\4") - buf.write(u"\2EEee\4\2FFff\4\2GGgg\4\2HHhh\4\2IIii\4\2JJjj\4\2KK") - buf.write(u"kk\4\2LLll\4\2MMmm\4\2NNnn\4\2OOoo\4\2PPpp\4\2QQqq\4") - buf.write(u"\2RRrr\4\2SSss\4\2TTtt\4\2UUuu\4\2VVvv\4\2WWww\4\2XX") - buf.write(u"xx\4\2YYyy\4\2ZZzz\4\2[[{{\4\2\\\\||\5\2\62;CHch\6\2") - buf.write(u"--\61;C\\c|\f\2\13\17\"\"\u0087\u0087\u00a2\u00a2\u1682") - buf.write(u"\u1682\u2002\u200c\u202a\u202b\u2031\u2031\u2061\u2061") - buf.write(u"\u3002\u3002\4\2\f\f\17\17\2\u0259\2\3\3\2\2\2\2\5\3") - buf.write(u"\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2") - buf.write(u"\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2") - buf.write(u"\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2") - buf.write(u"\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'") - buf.write(u"\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2") - buf.write(u"\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2") - buf.write(u"\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2") - buf.write(u"\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2") - buf.write(u"\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3") - buf.write(u"\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2") - buf.write(u"_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2\u009f\3\2\2\2\2\u00a1") - buf.write(u"\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\3\u00a7\3\2") - buf.write(u"\2\2\5\u00b3\3\2\2\2\7\u00bf\3\2\2\2\t\u00cd\3\2\2\2") - buf.write(u"\13\u00db\3\2\2\2\r\u00e5\3\2\2\2\17\u0106\3\2\2\2\21") - buf.write(u"\u0115\3\2\2\2\23\u0117\3\2\2\2\25\u0149\3\2\2\2\27\u014d") - buf.write(u"\3\2\2\2\31\u0150\3\2\2\2\33\u0154\3\2\2\2\35\u015f\3") - buf.write(u"\2\2\2\37\u0164\3\2\2\2!\u016c\3\2\2\2#\u0177\3\2\2\2") - buf.write(u"%\u0180\3\2\2\2\'\u0185\3\2\2\2)\u0188\3\2\2\2+\u018e") - buf.write(u"\3\2\2\2-\u0193\3\2\2\2/\u019b\3\2\2\2\61\u01a0\3\2\2") - buf.write(u"\2\63\u01a6\3\2\2\2\65\u01ad\3\2\2\2\67\u01b5\3\2\2\2") - buf.write(u"9\u01bb\3\2\2\2;\u01c2\3\2\2\2=\u01cc\3\2\2\2?\u01d2") - buf.write(u"\3\2\2\2A\u01d4\3\2\2\2C\u01d6\3\2\2\2E\u01d9\3\2\2\2") - buf.write(u"G\u01db\3\2\2\2I\u01de\3\2\2\2K\u01e0\3\2\2\2M\u01e2") - buf.write(u"\3\2\2\2O\u01e4\3\2\2\2Q\u01e6\3\2\2\2S\u01e8\3\2\2\2") - buf.write(u"U\u01ea\3\2\2\2W\u01ec\3\2\2\2Y\u01ee\3\2\2\2[\u01f0") - buf.write(u"\3\2\2\2]\u01f2\3\2\2\2_\u01f4\3\2\2\2a\u01f6\3\2\2\2") - buf.write(u"c\u01f8\3\2\2\2e\u01fa\3\2\2\2g\u01fc\3\2\2\2i\u01fe") - buf.write(u"\3\2\2\2k\u0200\3\2\2\2m\u0202\3\2\2\2o\u0204\3\2\2\2") - buf.write(u"q\u0206\3\2\2\2s\u0208\3\2\2\2u\u020a\3\2\2\2w\u020c") - buf.write(u"\3\2\2\2y\u020e\3\2\2\2{\u0210\3\2\2\2}\u0212\3\2\2\2") - buf.write(u"\177\u0214\3\2\2\2\u0081\u0216\3\2\2\2\u0083\u0218\3") - buf.write(u"\2\2\2\u0085\u021a\3\2\2\2\u0087\u021c\3\2\2\2\u0089") - buf.write(u"\u021e\3\2\2\2\u008b\u0220\3\2\2\2\u008d\u0222\3\2\2") - buf.write(u"\2\u008f\u0224\3\2\2\2\u0091\u0226\3\2\2\2\u0093\u0228") - buf.write(u"\3\2\2\2\u0095\u022a\3\2\2\2\u0097\u022c\3\2\2\2\u0099") - buf.write(u"\u022e\3\2\2\2\u009b\u0230\3\2\2\2\u009d\u0233\3\2\2") - buf.write(u"\2\u009f\u0236\3\2\2\2\u00a1\u023c\3\2\2\2\u00a3\u024a") - buf.write(u"\3\2\2\2\u00a5\u0255\3\2\2\2\u00a7\u00b0\7/\2\2\u00a8") - buf.write(u"\u00b1\7\62\2\2\u00a9\u00ad\t\2\2\2\u00aa\u00ac\t\3\2") - buf.write(u"\2\u00ab\u00aa\3\2\2\2\u00ac\u00af\3\2\2\2\u00ad\u00ab") - buf.write(u"\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae\u00b1\3\2\2\2\u00af") - buf.write(u"\u00ad\3\2\2\2\u00b0\u00a8\3\2\2\2\u00b0\u00a9\3\2\2") - buf.write(u"\2\u00b1\4\3\2\2\2\u00b2\u00b4\7-\2\2\u00b3\u00b2\3\2") - buf.write(u"\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00bd\3\2\2\2\u00b5\u00be") - buf.write(u"\7\62\2\2\u00b6\u00ba\t\2\2\2\u00b7\u00b9\t\3\2\2\u00b8") - buf.write(u"\u00b7\3\2\2\2\u00b9\u00bc\3\2\2\2\u00ba\u00b8\3\2\2") - buf.write(u"\2\u00ba\u00bb\3\2\2\2\u00bb\u00be\3\2\2\2\u00bc\u00ba") - buf.write(u"\3\2\2\2\u00bd\u00b5\3\2\2\2\u00bd\u00b6\3\2\2\2\u00be") - buf.write(u"\6\3\2\2\2\u00bf\u00c3\7/\2\2\u00c0\u00c2\t\3\2\2\u00c1") - buf.write(u"\u00c0\3\2\2\2\u00c2\u00c5\3\2\2\2\u00c3\u00c1\3\2\2") - buf.write(u"\2\u00c3\u00c4\3\2\2\2\u00c4\u00c6\3\2\2\2\u00c5\u00c3") - buf.write(u"\3\2\2\2\u00c6\u00c8\7\60\2\2\u00c7\u00c9\t\3\2\2\u00c8") - buf.write(u"\u00c7\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00c8\3\2\2") - buf.write(u"\2\u00ca\u00cb\3\2\2\2\u00cb\b\3\2\2\2\u00cc\u00ce\7") - buf.write(u"-\2\2\u00cd\u00cc\3\2\2\2\u00cd\u00ce\3\2\2\2\u00ce\u00d2") - buf.write(u"\3\2\2\2\u00cf\u00d1\t\3\2\2\u00d0\u00cf\3\2\2\2\u00d1") - buf.write(u"\u00d4\3\2\2\2\u00d2\u00d0\3\2\2\2\u00d2\u00d3\3\2\2") - buf.write(u"\2\u00d3\u00d5\3\2\2\2\u00d4\u00d2\3\2\2\2\u00d5\u00d7") - buf.write(u"\7\60\2\2\u00d6\u00d8\t\3\2\2\u00d7\u00d6\3\2\2\2\u00d8") - buf.write(u"\u00d9\3\2\2\2\u00d9\u00d7\3\2\2\2\u00d9\u00da\3\2\2") - buf.write(u"\2\u00da\n\3\2\2\2\u00db\u00dc\7j\2\2\u00dc\u00e0\5I") - buf.write(u"%\2\u00dd\u00df\5\u009bN\2\u00de\u00dd\3\2\2\2\u00df") - buf.write(u"\u00e2\3\2\2\2\u00e0\u00de\3\2\2\2\u00e0\u00e1\3\2\2") - buf.write(u"\2\u00e1\u00e3\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e3\u00e4") - buf.write(u"\5I%\2\u00e4\f\3\2\2\2\u00e5\u00e6\7d\2\2\u00e6\u00ee") - buf.write(u"\5I%\2\u00e7\u00e8\5\u009dO\2\u00e8\u00e9\5\u009dO\2") - buf.write(u"\u00e9\u00ea\5\u009dO\2\u00ea\u00eb\5\u009dO\2\u00eb") - buf.write(u"\u00ed\3\2\2\2\u00ec\u00e7\3\2\2\2\u00ed\u00f0\3\2\2") - buf.write(u"\2\u00ee\u00ec\3\2\2\2\u00ee\u00ef\3\2\2\2\u00ef\u0102") - buf.write(u"\3\2\2\2\u00f0\u00ee\3\2\2\2\u00f1\u00f2\5\u009dO\2\u00f2") - buf.write(u"\u00f3\5\u009dO\2\u00f3\u00f4\5\u009dO\2\u00f4\u00f5") - buf.write(u"\5\u009dO\2\u00f5\u0103\3\2\2\2\u00f6\u00f7\5\u009dO") - buf.write(u"\2\u00f7\u00f8\5\u009dO\2\u00f8\u00f9\5\u009dO\2\u00f9") - buf.write(u"\u00fa\3\2\2\2\u00fa\u00fb\7?\2\2\u00fb\u0103\3\2\2\2") - buf.write(u"\u00fc\u00fd\5\u009dO\2\u00fd\u00fe\5\u009dO\2\u00fe") - buf.write(u"\u00ff\3\2\2\2\u00ff\u0100\7?\2\2\u0100\u0101\7?\2\2") - buf.write(u"\u0101\u0103\3\2\2\2\u0102\u00f1\3\2\2\2\u0102\u00f6") - buf.write(u"\3\2\2\2\u0102\u00fc\3\2\2\2\u0103\u0104\3\2\2\2\u0104") - buf.write(u"\u0105\5I%\2\u0105\16\3\2\2\2\u0106\u010e\5I%\2\u0107") - buf.write(u"\u010d\n\4\2\2\u0108\u0109\7^\2\2\u0109\u010d\7)\2\2") - buf.write(u"\u010a\u010b\7^\2\2\u010b\u010d\7^\2\2\u010c\u0107\3") - buf.write(u"\2\2\2\u010c\u0108\3\2\2\2\u010c\u010a\3\2\2\2\u010d") - buf.write(u"\u0110\3\2\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3\2\2") - buf.write(u"\2\u010f\u0111\3\2\2\2\u0110\u010e\3\2\2\2\u0111\u0112") - buf.write(u"\5I%\2\u0112\20\3\2\2\2\u0113\u0116\5/\30\2\u0114\u0116") - buf.write(u"\5\61\31\2\u0115\u0113\3\2\2\2\u0115\u0114\3\2\2\2\u0116") - buf.write(u"\22\3\2\2\2\u0117\u0118\7v\2\2\u0118\u0119\5I%\2\u0119") - buf.write(u"\u011a\t\3\2\2\u011a\u011b\t\3\2\2\u011b\u011c\t\3\2") - buf.write(u"\2\u011c\u011d\t\3\2\2\u011d\u0122\5[.\2\u011e\u011f") - buf.write(u"\7\62\2\2\u011f\u0123\t\2\2\2\u0120\u0121\7\63\2\2\u0121") - buf.write(u"\u0123\t\5\2\2\u0122\u011e\3\2\2\2\u0122\u0120\3\2\2") - buf.write(u"\2\u0123\u0124\3\2\2\2\u0124\u012b\5[.\2\u0125\u0126") - buf.write(u"\7\62\2\2\u0126\u012c\t\2\2\2\u0127\u0128\t\6\2\2\u0128") - buf.write(u"\u012c\t\3\2\2\u0129\u012a\7\65\2\2\u012a\u012c\t\7\2") - buf.write(u"\2\u012b\u0125\3\2\2\2\u012b\u0127\3\2\2\2\u012b\u0129") - buf.write(u"\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u0132\7V\2\2\u012e") - buf.write(u"\u012f\t\7\2\2\u012f\u0133\t\3\2\2\u0130\u0131\7\64\2") - buf.write(u"\2\u0131\u0133\t\b\2\2\u0132\u012e\3\2\2\2\u0132\u0130") - buf.write(u"\3\2\2\2\u0133\u0134\3\2\2\2\u0134\u0135\5K&\2\u0135") - buf.write(u"\u0136\t\t\2\2\u0136\u0137\t\3\2\2\u0137\u013c\5K&\2") - buf.write(u"\u0138\u0139\t\t\2\2\u0139\u013d\t\3\2\2\u013a\u013b") - buf.write(u"\78\2\2\u013b\u013d\7\62\2\2\u013c\u0138\3\2\2\2\u013c") - buf.write(u"\u013a\3\2\2\2\u013d\u0144\3\2\2\2\u013e\u0140\5M\'\2") - buf.write(u"\u013f\u0141\t\3\2\2\u0140\u013f\3\2\2\2\u0141\u0142") - buf.write(u"\3\2\2\2\u0142\u0140\3\2\2\2\u0142\u0143\3\2\2\2\u0143") - buf.write(u"\u0145\3\2\2\2\u0144\u013e\3\2\2\2\u0144\u0145\3\2\2") - buf.write(u"\2\u0145\u0146\3\2\2\2\u0146\u0147\7\\\2\2\u0147\u0148") - buf.write(u"\5I%\2\u0148\24\3\2\2\2\u0149\u014a\5e\63\2\u014a\u014b") - buf.write(u"\5\177@\2\u014b\u014c\5k\66\2\u014c\26\3\2\2\2\u014d") - buf.write(u"\u014e\5\u0081A\2\u014e\u014f\5\u0087D\2\u014f\30\3\2") - buf.write(u"\2\2\u0150\u0151\5\177@\2\u0151\u0152\5\u0081A\2\u0152") - buf.write(u"\u0153\5\u008bF\2\u0153\32\3\2\2\2\u0154\u0155\5o8\2") - buf.write(u"\u0155\u0156\5\u0081A\2\u0156\u0157\5{>\2\u0157\u0158") - buf.write(u"\5{>\2\u0158\u0159\5\u0081A\2\u0159\u015a\5\u0091I\2") - buf.write(u"\u015a\u015b\5m\67\2\u015b\u015c\5k\66\2\u015c\u015d") - buf.write(u"\5g\64\2\u015d\u015e\5\u0095K\2\u015e\34\3\2\2\2\u015f") - buf.write(u"\u0160\5{>\2\u0160\u0161\5u;\2\u0161\u0162\5y=\2\u0162") - buf.write(u"\u0163\5m\67\2\u0163\36\3\2\2\2\u0164\u0165\5}?\2\u0165") - buf.write(u"\u0166\5e\63\2\u0166\u0167\5\u008bF\2\u0167\u0168\5i") - buf.write(u"\65\2\u0168\u0169\5s:\2\u0169\u016a\5m\67\2\u016a\u016b") - buf.write(u"\5\u0089E\2\u016b \3\2\2\2\u016c\u016d\5u;\2\u016d\u016e") - buf.write(u"\5\u0089E\2\u016e\u016f\5\u0089E\2\u016f\u0170\5\u008d") - buf.write(u"G\2\u0170\u0171\5\u0083B\2\u0171\u0172\5m\67\2\u0172") - buf.write(u"\u0173\5\u0087D\2\u0173\u0174\5\u0089E\2\u0174\u0175") - buf.write(u"\5m\67\2\u0175\u0176\5\u008bF\2\u0176\"\3\2\2\2\u0177") - buf.write(u"\u0178\5u;\2\u0178\u0179\5\u0089E\2\u0179\u017a\5\u0089") - buf.write(u"E\2\u017a\u017b\5\u008dG\2\u017b\u017c\5g\64\2\u017c") - buf.write(u"\u017d\5\u0089E\2\u017d\u017e\5m\67\2\u017e\u017f\5\u008b") - buf.write(u"F\2\u017f$\3\2\2\2\u0180\u0181\5{>\2\u0181\u0182\5e\63") - buf.write(u"\2\u0182\u0183\5\u0089E\2\u0183\u0184\5\u008bF\2\u0184") - buf.write(u"&\3\2\2\2\u0185\u0186\5u;\2\u0186\u0187\5\177@\2\u0187") - buf.write(u"(\3\2\2\2\u0188\u0189\5\u0089E\2\u0189\u018a\5\u008b") - buf.write(u"F\2\u018a\u018b\5e\63\2\u018b\u018c\5\u0087D\2\u018c") - buf.write(u"\u018d\5\u008bF\2\u018d*\3\2\2\2\u018e\u018f\5\u0089") - buf.write(u"E\2\u018f\u0190\5\u008bF\2\u0190\u0191\5\u0081A\2\u0191") - buf.write(u"\u0192\5\u0083B\2\u0192,\3\2\2\2\u0193\u0194\5\u0089") - buf.write(u"E\2\u0194\u0195\5m\67\2\u0195\u0196\5i\65\2\u0196\u0197") - buf.write(u"\5\u0081A\2\u0197\u0198\5\177@\2\u0198\u0199\5k\66\2") - buf.write(u"\u0199\u019a\5\u0089E\2\u019a.\3\2\2\2\u019b\u019c\5") - buf.write(u"\u008bF\2\u019c\u019d\5\u0087D\2\u019d\u019e\5\u008d") - buf.write(u"G\2\u019e\u019f\5m\67\2\u019f\60\3\2\2\2\u01a0\u01a1") - buf.write(u"\5o8\2\u01a1\u01a2\5e\63\2\u01a2\u01a3\5{>\2\u01a3\u01a4") - buf.write(u"\5\u0089E\2\u01a4\u01a5\5m\67\2\u01a5\62\3\2\2\2\u01a6") - buf.write(u"\u01a7\5\u0091I\2\u01a7\u01a8\5u;\2\u01a8\u01a9\5\u008b") - buf.write(u"F\2\u01a9\u01aa\5s:\2\u01aa\u01ab\5u;\2\u01ab\u01ac\5") - buf.write(u"\177@\2\u01ac\64\3\2\2\2\u01ad\u01ae\5\u0087D\2\u01ae") - buf.write(u"\u01af\5m\67\2\u01af\u01b0\5\u0083B\2\u01b0\u01b1\5m") - buf.write(u"\67\2\u01b1\u01b2\5e\63\2\u01b2\u01b3\5\u008bF\2\u01b3") - buf.write(u"\u01b4\5\u0089E\2\u01b4\66\3\2\2\2\u01b5\u01b6\5\u008b") - buf.write(u"F\2\u01b6\u01b7\5u;\2\u01b7\u01b8\5}?\2\u01b8\u01b9\5") - buf.write(u"m\67\2\u01b9\u01ba\5\u0089E\2\u01ba8\3\2\2\2\u01bb\u01bf") - buf.write(u"\t\n\2\2\u01bc\u01be\t\13\2\2\u01bd\u01bc\3\2\2\2\u01be") - buf.write(u"\u01c1\3\2\2\2\u01bf\u01bd\3\2\2\2\u01bf\u01c0\3\2\2") - buf.write(u"\2\u01c0:\3\2\2\2\u01c1\u01bf\3\2\2\2\u01c2\u01c6\t\n") - buf.write(u"\2\2\u01c3\u01c5\t\f\2\2\u01c4\u01c3\3\2\2\2\u01c5\u01c8") - buf.write(u"\3\2\2\2\u01c6\u01c4\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7") - buf.write(u"<\3\2\2\2\u01c8\u01c6\3\2\2\2\u01c9\u01cd\7?\2\2\u01ca") - buf.write(u"\u01cb\7?\2\2\u01cb\u01cd\7?\2\2\u01cc\u01c9\3\2\2\2") - buf.write(u"\u01cc\u01ca\3\2\2\2\u01cd>\3\2\2\2\u01ce\u01cf\7#\2") - buf.write(u"\2\u01cf\u01d3\7?\2\2\u01d0\u01d1\7>\2\2\u01d1\u01d3") - buf.write(u"\7@\2\2\u01d2\u01ce\3\2\2\2\u01d2\u01d0\3\2\2\2\u01d3") - buf.write(u"@\3\2\2\2\u01d4\u01d5\7>\2\2\u01d5B\3\2\2\2\u01d6\u01d7") - buf.write(u"\7>\2\2\u01d7\u01d8\7?\2\2\u01d8D\3\2\2\2\u01d9\u01da") - buf.write(u"\7@\2\2\u01daF\3\2\2\2\u01db\u01dc\7@\2\2\u01dc\u01dd") - buf.write(u"\7?\2\2\u01ddH\3\2\2\2\u01de\u01df\7)\2\2\u01dfJ\3\2") - buf.write(u"\2\2\u01e0\u01e1\7<\2\2\u01e1L\3\2\2\2\u01e2\u01e3\7") - buf.write(u"\60\2\2\u01e3N\3\2\2\2\u01e4\u01e5\7.\2\2\u01e5P\3\2") - buf.write(u"\2\2\u01e6\u01e7\7+\2\2\u01e7R\3\2\2\2\u01e8\u01e9\7") - buf.write(u"*\2\2\u01e9T\3\2\2\2\u01ea\u01eb\7_\2\2\u01ebV\3\2\2") - buf.write(u"\2\u01ec\u01ed\7]\2\2\u01edX\3\2\2\2\u01ee\u01ef\7-\2") - buf.write(u"\2\u01efZ\3\2\2\2\u01f0\u01f1\5]/\2\u01f1\\\3\2\2\2\u01f2") - buf.write(u"\u01f3\7/\2\2\u01f3^\3\2\2\2\u01f4\u01f5\7`\2\2\u01f5") - buf.write(u"`\3\2\2\2\u01f6\u01f7\7\61\2\2\u01f7b\3\2\2\2\u01f8\u01f9") - buf.write(u"\7,\2\2\u01f9d\3\2\2\2\u01fa\u01fb\t\r\2\2\u01fbf\3\2") - buf.write(u"\2\2\u01fc\u01fd\t\16\2\2\u01fdh\3\2\2\2\u01fe\u01ff") - buf.write(u"\t\17\2\2\u01ffj\3\2\2\2\u0200\u0201\t\20\2\2\u0201l") - buf.write(u"\3\2\2\2\u0202\u0203\t\21\2\2\u0203n\3\2\2\2\u0204\u0205") - buf.write(u"\t\22\2\2\u0205p\3\2\2\2\u0206\u0207\t\23\2\2\u0207r") - buf.write(u"\3\2\2\2\u0208\u0209\t\24\2\2\u0209t\3\2\2\2\u020a\u020b") - buf.write(u"\t\25\2\2\u020bv\3\2\2\2\u020c\u020d\t\26\2\2\u020dx") - buf.write(u"\3\2\2\2\u020e\u020f\t\27\2\2\u020fz\3\2\2\2\u0210\u0211") - buf.write(u"\t\30\2\2\u0211|\3\2\2\2\u0212\u0213\t\31\2\2\u0213~") - buf.write(u"\3\2\2\2\u0214\u0215\t\32\2\2\u0215\u0080\3\2\2\2\u0216") - buf.write(u"\u0217\t\33\2\2\u0217\u0082\3\2\2\2\u0218\u0219\t\34") - buf.write(u"\2\2\u0219\u0084\3\2\2\2\u021a\u021b\t\35\2\2\u021b\u0086") - buf.write(u"\3\2\2\2\u021c\u021d\t\36\2\2\u021d\u0088\3\2\2\2\u021e") - buf.write(u"\u021f\t\37\2\2\u021f\u008a\3\2\2\2\u0220\u0221\t \2") - buf.write(u"\2\u0221\u008c\3\2\2\2\u0222\u0223\t!\2\2\u0223\u008e") - buf.write(u"\3\2\2\2\u0224\u0225\t\"\2\2\u0225\u0090\3\2\2\2\u0226") - buf.write(u"\u0227\t#\2\2\u0227\u0092\3\2\2\2\u0228\u0229\t$\2\2") - buf.write(u"\u0229\u0094\3\2\2\2\u022a\u022b\t%\2\2\u022b\u0096\3") - buf.write(u"\2\2\2\u022c\u022d\t&\2\2\u022d\u0098\3\2\2\2\u022e\u022f") - buf.write(u"\t\'\2\2\u022f\u009a\3\2\2\2\u0230\u0231\5\u0099M\2\u0231") - buf.write(u"\u0232\5\u0099M\2\u0232\u009c\3\2\2\2\u0233\u0234\t(") - buf.write(u"\2\2\u0234\u009e\3\2\2\2\u0235\u0237\t)\2\2\u0236\u0235") - buf.write(u"\3\2\2\2\u0237\u0238\3\2\2\2\u0238\u0236\3\2\2\2\u0238") - buf.write(u"\u0239\3\2\2\2\u0239\u023a\3\2\2\2\u023a\u023b\bP\2\2") - buf.write(u"\u023b\u00a0\3\2\2\2\u023c\u023d\7\61\2\2\u023d\u023e") - buf.write(u"\7,\2\2\u023e\u0242\3\2\2\2\u023f\u0241\13\2\2\2\u0240") - buf.write(u"\u023f\3\2\2\2\u0241\u0244\3\2\2\2\u0242\u0243\3\2\2") - buf.write(u"\2\u0242\u0240\3\2\2\2\u0243\u0245\3\2\2\2\u0244\u0242") - buf.write(u"\3\2\2\2\u0245\u0246\7,\2\2\u0246\u0247\7\61\2\2\u0247") - buf.write(u"\u0248\3\2\2\2\u0248\u0249\bQ\2\2\u0249\u00a2\3\2\2\2") - buf.write(u"\u024a\u024b\7\61\2\2\u024b\u024c\7\61\2\2\u024c\u0250") - buf.write(u"\3\2\2\2\u024d\u024f\n*\2\2\u024e\u024d\3\2\2\2\u024f") - buf.write(u"\u0252\3\2\2\2\u0250\u024e\3\2\2\2\u0250\u0251\3\2\2") - buf.write(u"\2\u0251\u0253\3\2\2\2\u0252\u0250\3\2\2\2\u0253\u0254") - buf.write(u"\bR\2\2\u0254\u00a4\3\2\2\2\u0255\u0256\13\2\2\2\u0256") - buf.write(u"\u00a6\3\2\2\2 \2\u00ad\u00b0\u00b3\u00ba\u00bd\u00c3") - buf.write(u"\u00ca\u00cd\u00d2\u00d9\u00e0\u00ee\u0102\u010c\u010e") - buf.write(u"\u0115\u0122\u012b\u0132\u013c\u0142\u0144\u01bf\u01c6") - buf.write(u"\u01cc\u01d2\u0238\u0242\u0250\3\b\2\2") + buf.write(u"8\t8\49\t9\3\2\3\2\3\2\3\2\7\2x\n\2\f\2\16\2{\13\2\5") + buf.write(u"\2}\n\2\3\3\5\3\u0080\n\3\3\3\3\3\3\3\7\3\u0085\n\3\f") + buf.write(u"\3\16\3\u0088\13\3\5\3\u008a\n\3\3\4\3\4\7\4\u008e\n") + buf.write(u"\4\f\4\16\4\u0091\13\4\3\4\3\4\6\4\u0095\n\4\r\4\16\4") + buf.write(u"\u0096\3\5\5\5\u009a\n\5\3\5\7\5\u009d\n\5\f\5\16\5\u00a0") + buf.write(u"\13\5\3\5\3\5\6\5\u00a4\n\5\r\5\16\5\u00a5\3\6\3\6\3") + buf.write(u"\6\7\6\u00ab\n\6\f\6\16\6\u00ae\13\6\3\6\3\6\3\7\3\7") + buf.write(u"\3\7\3\7\3\7\3\7\3\7\7\7\u00b9\n\7\f\7\16\7\u00bc\13") + buf.write(u"\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3") + buf.write(u"\7\3\7\3\7\3\7\3\7\5\7\u00cf\n\7\3\7\3\7\3\b\3\b\3\b") + buf.write(u"\3\b\3\b\3\b\7\b\u00d9\n\b\f\b\16\b\u00dc\13\b\3\b\3") + buf.write(u"\b\3\t\3\t\5\t\u00e2\n\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n") + buf.write(u"\3\n\3\n\3\n\3\n\5\n\u00ef\n\n\3\n\3\n\3\n\3\n\3\n\3") + buf.write(u"\n\3\n\5\n\u00f8\n\n\3\n\3\n\3\n\3\n\3\n\5\n\u00ff\n") + buf.write(u"\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\5\n\u0109\n\n\3\n") + buf.write(u"\3\n\6\n\u010d\n\n\r\n\16\n\u010e\5\n\u0111\n\n\3\n\3") + buf.write(u"\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\r\3\r\3\r\3") + buf.write(u"\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16") + buf.write(u"\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3") + buf.write(u"\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21") + buf.write(u"\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3") + buf.write(u"\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24") + buf.write(u"\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3") + buf.write(u"\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30") + buf.write(u"\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3") + buf.write(u"\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33") + buf.write(u"\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3") + buf.write(u"\35\7\35\u018a\n\35\f\35\16\35\u018d\13\35\3\36\3\36") + buf.write(u"\7\36\u0191\n\36\f\36\16\36\u0194\13\36\3\37\3\37\3\37") + buf.write(u"\5\37\u0199\n\37\3 \3 \3 \3 \5 \u019f\n \3!\3!\3\"\3") + buf.write(u"\"\3\"\3#\3#\3$\3$\3$\3%\3%\3&\3&\3\'\3\'\3(\3(\3)\3") + buf.write(u")\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3\60\3\61") + buf.write(u"\3\61\3\62\3\62\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3") + buf.write(u"\66\6\66\u01cf\n\66\r\66\16\66\u01d0\3\66\3\66\3\67\3") + buf.write(u"\67\3\67\3\67\7\67\u01d9\n\67\f\67\16\67\u01dc\13\67") + buf.write(u"\3\67\3\67\3\67\3\67\3\67\38\38\38\38\78\u01e7\n8\f8") + buf.write(u"\168\u01ea\138\38\38\39\39\3\u01da\2:\3\3\5\4\7\5\t\6") + buf.write(u"\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20") + buf.write(u"\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65") + buf.write(u"\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]") + buf.write(u"\60_\61a\62c\63e\2g\2i\2k\64m\65o\66q\67\3\2\21\3\2\63") + buf.write(u";\3\2\62;\4\2))^^\3\2\62\64\3\2\63\64\3\2\62\63\3\2\62") + buf.write(u"\65\3\2\62\67\5\2C\\aac|\6\2\62;C\\aac|\7\2//\62;C\\") + buf.write(u"aac|\5\2\62;CHch\6\2--\61;C\\c|\f\2\13\17\"\"\u0087\u0087") + buf.write(u"\u00a2\u00a2\u1682\u1682\u2002\u200c\u202a\u202b\u2031") + buf.write(u"\u2031\u2061\u2061\u3002\u3002\4\2\f\f\17\17\2\u020b") + buf.write(u"\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13") + buf.write(u"\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3") + buf.write(u"\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3") + buf.write(u"\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2") + buf.write(u"\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3") + buf.write(u"\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2") + buf.write(u"\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2") + buf.write(u"?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2") + buf.write(u"\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2") + buf.write(u"\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2") + buf.write(u"\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2k\3") + buf.write(u"\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\3s\3\2\2\2\5") + buf.write(u"\177\3\2\2\2\7\u008b\3\2\2\2\t\u0099\3\2\2\2\13\u00a7") + buf.write(u"\3\2\2\2\r\u00b1\3\2\2\2\17\u00d2\3\2\2\2\21\u00e1\3") + buf.write(u"\2\2\2\23\u00e3\3\2\2\2\25\u0115\3\2\2\2\27\u0119\3\2") + buf.write(u"\2\2\31\u011c\3\2\2\2\33\u0120\3\2\2\2\35\u012b\3\2\2") + buf.write(u"\2\37\u0130\3\2\2\2!\u0138\3\2\2\2#\u0143\3\2\2\2%\u014c") + buf.write(u"\3\2\2\2\'\u0151\3\2\2\2)\u0154\3\2\2\2+\u015a\3\2\2") + buf.write(u"\2-\u015f\3\2\2\2/\u0167\3\2\2\2\61\u016c\3\2\2\2\63") + buf.write(u"\u0172\3\2\2\2\65\u0179\3\2\2\2\67\u0181\3\2\2\29\u0187") + buf.write(u"\3\2\2\2;\u018e\3\2\2\2=\u0198\3\2\2\2?\u019e\3\2\2\2") + buf.write(u"A\u01a0\3\2\2\2C\u01a2\3\2\2\2E\u01a5\3\2\2\2G\u01a7") + buf.write(u"\3\2\2\2I\u01aa\3\2\2\2K\u01ac\3\2\2\2M\u01ae\3\2\2\2") + buf.write(u"O\u01b0\3\2\2\2Q\u01b2\3\2\2\2S\u01b4\3\2\2\2U\u01b6") + buf.write(u"\3\2\2\2W\u01b8\3\2\2\2Y\u01ba\3\2\2\2[\u01bc\3\2\2\2") + buf.write(u"]\u01be\3\2\2\2_\u01c0\3\2\2\2a\u01c2\3\2\2\2c\u01c4") + buf.write(u"\3\2\2\2e\u01c6\3\2\2\2g\u01c8\3\2\2\2i\u01cb\3\2\2\2") + buf.write(u"k\u01ce\3\2\2\2m\u01d4\3\2\2\2o\u01e2\3\2\2\2q\u01ed") + buf.write(u"\3\2\2\2s|\7/\2\2t}\7\62\2\2uy\t\2\2\2vx\t\3\2\2wv\3") + buf.write(u"\2\2\2x{\3\2\2\2yw\3\2\2\2yz\3\2\2\2z}\3\2\2\2{y\3\2") + buf.write(u"\2\2|t\3\2\2\2|u\3\2\2\2}\4\3\2\2\2~\u0080\7-\2\2\177") + buf.write(u"~\3\2\2\2\177\u0080\3\2\2\2\u0080\u0089\3\2\2\2\u0081") + buf.write(u"\u008a\7\62\2\2\u0082\u0086\t\2\2\2\u0083\u0085\t\3\2") + buf.write(u"\2\u0084\u0083\3\2\2\2\u0085\u0088\3\2\2\2\u0086\u0084") + buf.write(u"\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u008a\3\2\2\2\u0088") + buf.write(u"\u0086\3\2\2\2\u0089\u0081\3\2\2\2\u0089\u0082\3\2\2") + buf.write(u"\2\u008a\6\3\2\2\2\u008b\u008f\7/\2\2\u008c\u008e\t\3") + buf.write(u"\2\2\u008d\u008c\3\2\2\2\u008e\u0091\3\2\2\2\u008f\u008d") + buf.write(u"\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u0092\3\2\2\2\u0091") + buf.write(u"\u008f\3\2\2\2\u0092\u0094\7\60\2\2\u0093\u0095\t\3\2") + buf.write(u"\2\u0094\u0093\3\2\2\2\u0095\u0096\3\2\2\2\u0096\u0094") + buf.write(u"\3\2\2\2\u0096\u0097\3\2\2\2\u0097\b\3\2\2\2\u0098\u009a") + buf.write(u"\7-\2\2\u0099\u0098\3\2\2\2\u0099\u009a\3\2\2\2\u009a") + buf.write(u"\u009e\3\2\2\2\u009b\u009d\t\3\2\2\u009c\u009b\3\2\2") + buf.write(u"\2\u009d\u00a0\3\2\2\2\u009e\u009c\3\2\2\2\u009e\u009f") + buf.write(u"\3\2\2\2\u009f\u00a1\3\2\2\2\u00a0\u009e\3\2\2\2\u00a1") + buf.write(u"\u00a3\7\60\2\2\u00a2\u00a4\t\3\2\2\u00a3\u00a2\3\2\2") + buf.write(u"\2\u00a4\u00a5\3\2\2\2\u00a5\u00a3\3\2\2\2\u00a5\u00a6") + buf.write(u"\3\2\2\2\u00a6\n\3\2\2\2\u00a7\u00a8\7j\2\2\u00a8\u00ac") + buf.write(u"\5I%\2\u00a9\u00ab\5g\64\2\u00aa\u00a9\3\2\2\2\u00ab") + buf.write(u"\u00ae\3\2\2\2\u00ac\u00aa\3\2\2\2\u00ac\u00ad\3\2\2") + buf.write(u"\2\u00ad\u00af\3\2\2\2\u00ae\u00ac\3\2\2\2\u00af\u00b0") + buf.write(u"\5I%\2\u00b0\f\3\2\2\2\u00b1\u00b2\7d\2\2\u00b2\u00ba") + buf.write(u"\5I%\2\u00b3\u00b4\5i\65\2\u00b4\u00b5\5i\65\2\u00b5") + buf.write(u"\u00b6\5i\65\2\u00b6\u00b7\5i\65\2\u00b7\u00b9\3\2\2") + buf.write(u"\2\u00b8\u00b3\3\2\2\2\u00b9\u00bc\3\2\2\2\u00ba\u00b8") + buf.write(u"\3\2\2\2\u00ba\u00bb\3\2\2\2\u00bb\u00ce\3\2\2\2\u00bc") + buf.write(u"\u00ba\3\2\2\2\u00bd\u00be\5i\65\2\u00be\u00bf\5i\65") + buf.write(u"\2\u00bf\u00c0\5i\65\2\u00c0\u00c1\5i\65\2\u00c1\u00cf") + buf.write(u"\3\2\2\2\u00c2\u00c3\5i\65\2\u00c3\u00c4\5i\65\2\u00c4") + buf.write(u"\u00c5\5i\65\2\u00c5\u00c6\3\2\2\2\u00c6\u00c7\7?\2\2") + buf.write(u"\u00c7\u00cf\3\2\2\2\u00c8\u00c9\5i\65\2\u00c9\u00ca") + buf.write(u"\5i\65\2\u00ca\u00cb\3\2\2\2\u00cb\u00cc\7?\2\2\u00cc") + buf.write(u"\u00cd\7?\2\2\u00cd\u00cf\3\2\2\2\u00ce\u00bd\3\2\2\2") + buf.write(u"\u00ce\u00c2\3\2\2\2\u00ce\u00c8\3\2\2\2\u00cf\u00d0") + buf.write(u"\3\2\2\2\u00d0\u00d1\5I%\2\u00d1\16\3\2\2\2\u00d2\u00da") + buf.write(u"\5I%\2\u00d3\u00d9\n\4\2\2\u00d4\u00d5\7^\2\2\u00d5\u00d9") + buf.write(u"\7)\2\2\u00d6\u00d7\7^\2\2\u00d7\u00d9\7^\2\2\u00d8\u00d3") + buf.write(u"\3\2\2\2\u00d8\u00d4\3\2\2\2\u00d8\u00d6\3\2\2\2\u00d9") + buf.write(u"\u00dc\3\2\2\2\u00da\u00d8\3\2\2\2\u00da\u00db\3\2\2") + buf.write(u"\2\u00db\u00dd\3\2\2\2\u00dc\u00da\3\2\2\2\u00dd\u00de") + buf.write(u"\5I%\2\u00de\20\3\2\2\2\u00df\u00e2\5/\30\2\u00e0\u00e2") + buf.write(u"\5\61\31\2\u00e1\u00df\3\2\2\2\u00e1\u00e0\3\2\2\2\u00e2") + buf.write(u"\22\3\2\2\2\u00e3\u00e4\7v\2\2\u00e4\u00e5\5I%\2\u00e5") + buf.write(u"\u00e6\t\3\2\2\u00e6\u00e7\t\3\2\2\u00e7\u00e8\t\3\2") + buf.write(u"\2\u00e8\u00e9\t\3\2\2\u00e9\u00ee\5[.\2\u00ea\u00eb") + buf.write(u"\7\62\2\2\u00eb\u00ef\t\2\2\2\u00ec\u00ed\7\63\2\2\u00ed") + buf.write(u"\u00ef\t\5\2\2\u00ee\u00ea\3\2\2\2\u00ee\u00ec\3\2\2") + buf.write(u"\2\u00ef\u00f0\3\2\2\2\u00f0\u00f7\5[.\2\u00f1\u00f2") + buf.write(u"\7\62\2\2\u00f2\u00f8\t\2\2\2\u00f3\u00f4\t\6\2\2\u00f4") + buf.write(u"\u00f8\t\3\2\2\u00f5\u00f6\7\65\2\2\u00f6\u00f8\t\7\2") + buf.write(u"\2\u00f7\u00f1\3\2\2\2\u00f7\u00f3\3\2\2\2\u00f7\u00f5") + buf.write(u"\3\2\2\2\u00f8\u00f9\3\2\2\2\u00f9\u00fe\7V\2\2\u00fa") + buf.write(u"\u00fb\t\7\2\2\u00fb\u00ff\t\3\2\2\u00fc\u00fd\7\64\2") + buf.write(u"\2\u00fd\u00ff\t\b\2\2\u00fe\u00fa\3\2\2\2\u00fe\u00fc") + buf.write(u"\3\2\2\2\u00ff\u0100\3\2\2\2\u0100\u0101\5K&\2\u0101") + buf.write(u"\u0102\t\t\2\2\u0102\u0103\t\3\2\2\u0103\u0108\5K&\2") + buf.write(u"\u0104\u0105\t\t\2\2\u0105\u0109\t\3\2\2\u0106\u0107") + buf.write(u"\78\2\2\u0107\u0109\7\62\2\2\u0108\u0104\3\2\2\2\u0108") + buf.write(u"\u0106\3\2\2\2\u0109\u0110\3\2\2\2\u010a\u010c\5M\'\2") + buf.write(u"\u010b\u010d\t\3\2\2\u010c\u010b\3\2\2\2\u010d\u010e") + buf.write(u"\3\2\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3\2\2\2\u010f") + buf.write(u"\u0111\3\2\2\2\u0110\u010a\3\2\2\2\u0110\u0111\3\2\2") + buf.write(u"\2\u0111\u0112\3\2\2\2\u0112\u0113\7\\\2\2\u0113\u0114") + buf.write(u"\5I%\2\u0114\24\3\2\2\2\u0115\u0116\7C\2\2\u0116\u0117") + buf.write(u"\7P\2\2\u0117\u0118\7F\2\2\u0118\26\3\2\2\2\u0119\u011a") + buf.write(u"\7Q\2\2\u011a\u011b\7T\2\2\u011b\30\3\2\2\2\u011c\u011d") + buf.write(u"\7P\2\2\u011d\u011e\7Q\2\2\u011e\u011f\7V\2\2\u011f\32") + buf.write(u"\3\2\2\2\u0120\u0121\7H\2\2\u0121\u0122\7Q\2\2\u0122") + buf.write(u"\u0123\7N\2\2\u0123\u0124\7N\2\2\u0124\u0125\7Q\2\2\u0125") + buf.write(u"\u0126\7Y\2\2\u0126\u0127\7G\2\2\u0127\u0128\7F\2\2\u0128") + buf.write(u"\u0129\7D\2\2\u0129\u012a\7[\2\2\u012a\34\3\2\2\2\u012b") + buf.write(u"\u012c\7N\2\2\u012c\u012d\7K\2\2\u012d\u012e\7M\2\2\u012e") + buf.write(u"\u012f\7G\2\2\u012f\36\3\2\2\2\u0130\u0131\7O\2\2\u0131") + buf.write(u"\u0132\7C\2\2\u0132\u0133\7V\2\2\u0133\u0134\7E\2\2\u0134") + buf.write(u"\u0135\7J\2\2\u0135\u0136\7G\2\2\u0136\u0137\7U\2\2\u0137") + buf.write(u" \3\2\2\2\u0138\u0139\7K\2\2\u0139\u013a\7U\2\2\u013a") + buf.write(u"\u013b\7U\2\2\u013b\u013c\7W\2\2\u013c\u013d\7R\2\2\u013d") + buf.write(u"\u013e\7G\2\2\u013e\u013f\7T\2\2\u013f\u0140\7U\2\2\u0140") + buf.write(u"\u0141\7G\2\2\u0141\u0142\7V\2\2\u0142\"\3\2\2\2\u0143") + buf.write(u"\u0144\7K\2\2\u0144\u0145\7U\2\2\u0145\u0146\7U\2\2\u0146") + buf.write(u"\u0147\7W\2\2\u0147\u0148\7D\2\2\u0148\u0149\7U\2\2\u0149") + buf.write(u"\u014a\7G\2\2\u014a\u014b\7V\2\2\u014b$\3\2\2\2\u014c") + buf.write(u"\u014d\7N\2\2\u014d\u014e\7C\2\2\u014e\u014f\7U\2\2\u014f") + buf.write(u"\u0150\7V\2\2\u0150&\3\2\2\2\u0151\u0152\7K\2\2\u0152") + buf.write(u"\u0153\7P\2\2\u0153(\3\2\2\2\u0154\u0155\7U\2\2\u0155") + buf.write(u"\u0156\7V\2\2\u0156\u0157\7C\2\2\u0157\u0158\7T\2\2\u0158") + buf.write(u"\u0159\7V\2\2\u0159*\3\2\2\2\u015a\u015b\7U\2\2\u015b") + buf.write(u"\u015c\7V\2\2\u015c\u015d\7Q\2\2\u015d\u015e\7R\2\2\u015e") + buf.write(u",\3\2\2\2\u015f\u0160\7U\2\2\u0160\u0161\7G\2\2\u0161") + buf.write(u"\u0162\7E\2\2\u0162\u0163\7Q\2\2\u0163\u0164\7P\2\2\u0164") + buf.write(u"\u0165\7F\2\2\u0165\u0166\7U\2\2\u0166.\3\2\2\2\u0167") + buf.write(u"\u0168\7v\2\2\u0168\u0169\7t\2\2\u0169\u016a\7w\2\2\u016a") + buf.write(u"\u016b\7g\2\2\u016b\60\3\2\2\2\u016c\u016d\7h\2\2\u016d") + buf.write(u"\u016e\7c\2\2\u016e\u016f\7n\2\2\u016f\u0170\7u\2\2\u0170") + buf.write(u"\u0171\7g\2\2\u0171\62\3\2\2\2\u0172\u0173\7Y\2\2\u0173") + buf.write(u"\u0174\7K\2\2\u0174\u0175\7V\2\2\u0175\u0176\7J\2\2\u0176") + buf.write(u"\u0177\7K\2\2\u0177\u0178\7P\2\2\u0178\64\3\2\2\2\u0179") + buf.write(u"\u017a\7T\2\2\u017a\u017b\7G\2\2\u017b\u017c\7R\2\2\u017c") + buf.write(u"\u017d\7G\2\2\u017d\u017e\7C\2\2\u017e\u017f\7V\2\2\u017f") + buf.write(u"\u0180\7U\2\2\u0180\66\3\2\2\2\u0181\u0182\7V\2\2\u0182") + buf.write(u"\u0183\7K\2\2\u0183\u0184\7O\2\2\u0184\u0185\7G\2\2\u0185") + buf.write(u"\u0186\7U\2\2\u01868\3\2\2\2\u0187\u018b\t\n\2\2\u0188") + buf.write(u"\u018a\t\13\2\2\u0189\u0188\3\2\2\2\u018a\u018d\3\2\2") + buf.write(u"\2\u018b\u0189\3\2\2\2\u018b\u018c\3\2\2\2\u018c:\3\2") + buf.write(u"\2\2\u018d\u018b\3\2\2\2\u018e\u0192\t\n\2\2\u018f\u0191") + buf.write(u"\t\f\2\2\u0190\u018f\3\2\2\2\u0191\u0194\3\2\2\2\u0192") + buf.write(u"\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193<\3\2\2\2\u0194") + buf.write(u"\u0192\3\2\2\2\u0195\u0199\7?\2\2\u0196\u0197\7?\2\2") + buf.write(u"\u0197\u0199\7?\2\2\u0198\u0195\3\2\2\2\u0198\u0196\3") + buf.write(u"\2\2\2\u0199>\3\2\2\2\u019a\u019b\7#\2\2\u019b\u019f") + buf.write(u"\7?\2\2\u019c\u019d\7>\2\2\u019d\u019f\7@\2\2\u019e\u019a") + buf.write(u"\3\2\2\2\u019e\u019c\3\2\2\2\u019f@\3\2\2\2\u01a0\u01a1") + buf.write(u"\7>\2\2\u01a1B\3\2\2\2\u01a2\u01a3\7>\2\2\u01a3\u01a4") + buf.write(u"\7?\2\2\u01a4D\3\2\2\2\u01a5\u01a6\7@\2\2\u01a6F\3\2") + buf.write(u"\2\2\u01a7\u01a8\7@\2\2\u01a8\u01a9\7?\2\2\u01a9H\3\2") + buf.write(u"\2\2\u01aa\u01ab\7)\2\2\u01abJ\3\2\2\2\u01ac\u01ad\7") + buf.write(u"<\2\2\u01adL\3\2\2\2\u01ae\u01af\7\60\2\2\u01afN\3\2") + buf.write(u"\2\2\u01b0\u01b1\7.\2\2\u01b1P\3\2\2\2\u01b2\u01b3\7") + buf.write(u"+\2\2\u01b3R\3\2\2\2\u01b4\u01b5\7*\2\2\u01b5T\3\2\2") + buf.write(u"\2\u01b6\u01b7\7_\2\2\u01b7V\3\2\2\2\u01b8\u01b9\7]\2") + buf.write(u"\2\u01b9X\3\2\2\2\u01ba\u01bb\7-\2\2\u01bbZ\3\2\2\2\u01bc") + buf.write(u"\u01bd\5]/\2\u01bd\\\3\2\2\2\u01be\u01bf\7/\2\2\u01bf") + buf.write(u"^\3\2\2\2\u01c0\u01c1\7`\2\2\u01c1`\3\2\2\2\u01c2\u01c3") + buf.write(u"\7\61\2\2\u01c3b\3\2\2\2\u01c4\u01c5\7,\2\2\u01c5d\3") + buf.write(u"\2\2\2\u01c6\u01c7\t\r\2\2\u01c7f\3\2\2\2\u01c8\u01c9") + buf.write(u"\5e\63\2\u01c9\u01ca\5e\63\2\u01cah\3\2\2\2\u01cb\u01cc") + buf.write(u"\t\16\2\2\u01ccj\3\2\2\2\u01cd\u01cf\t\17\2\2\u01ce\u01cd") + buf.write(u"\3\2\2\2\u01cf\u01d0\3\2\2\2\u01d0\u01ce\3\2\2\2\u01d0") + buf.write(u"\u01d1\3\2\2\2\u01d1\u01d2\3\2\2\2\u01d2\u01d3\b\66\2") + buf.write(u"\2\u01d3l\3\2\2\2\u01d4\u01d5\7\61\2\2\u01d5\u01d6\7") + buf.write(u",\2\2\u01d6\u01da\3\2\2\2\u01d7\u01d9\13\2\2\2\u01d8") + buf.write(u"\u01d7\3\2\2\2\u01d9\u01dc\3\2\2\2\u01da\u01db\3\2\2") + buf.write(u"\2\u01da\u01d8\3\2\2\2\u01db\u01dd\3\2\2\2\u01dc\u01da") + buf.write(u"\3\2\2\2\u01dd\u01de\7,\2\2\u01de\u01df\7\61\2\2\u01df") + buf.write(u"\u01e0\3\2\2\2\u01e0\u01e1\b\67\2\2\u01e1n\3\2\2\2\u01e2") + buf.write(u"\u01e3\7\61\2\2\u01e3\u01e4\7\61\2\2\u01e4\u01e8\3\2") + buf.write(u"\2\2\u01e5\u01e7\n\20\2\2\u01e6\u01e5\3\2\2\2\u01e7\u01ea") + buf.write(u"\3\2\2\2\u01e8\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9") + buf.write(u"\u01eb\3\2\2\2\u01ea\u01e8\3\2\2\2\u01eb\u01ec\b8\2\2") + buf.write(u"\u01ecp\3\2\2\2\u01ed\u01ee\13\2\2\2\u01eer\3\2\2\2 ") + buf.write(u"\2y|\177\u0086\u0089\u008f\u0096\u0099\u009e\u00a5\u00ac") + buf.write(u"\u00ba\u00ce\u00d8\u00da\u00e1\u00ee\u00f7\u00fe\u0108") + buf.write(u"\u010e\u0110\u018b\u0192\u0198\u019e\u01d0\u01da\u01e8") + buf.write(u"\3\b\2\2") return buf.getvalue() @@ -357,35 +307,35 @@ class STIXPatternLexer(Lexer): modeNames = [ u"DEFAULT_MODE" ] literalNames = [ u"<INVALID>", - u"'<'", u"'<='", u"'>'", u"'>='", u"'''", u"':'", u"'.'", u"','", - u"')'", u"'('", u"']'", u"'['", u"'+'", u"'-'", u"'^'", u"'/'", - u"'*'" ] + u"'AND'", u"'OR'", u"'NOT'", u"'FOLLOWEDBY'", u"'LIKE'", u"'MATCHES'", + u"'ISSUPERSET'", u"'ISSUBSET'", u"'LAST'", u"'IN'", u"'START'", + u"'STOP'", u"'SECONDS'", u"'true'", u"'false'", u"'WITHIN'", + u"'REPEATS'", u"'TIMES'", u"'<'", u"'<='", u"'>'", u"'>='", + u"'''", u"':'", u"'.'", u"','", u"')'", u"'('", u"']'", u"'['", + u"'+'", u"'-'", u"'^'", u"'/'", u"'*'" ] symbolicNames = [ u"<INVALID>", - u"IntNegLiteral", u"IntPosLiteral", u"FloatNegLiteral", u"FloatPosLiteral", - u"HexLiteral", u"BinaryLiteral", u"StringLiteral", u"BoolLiteral", - u"TimestampLiteral", u"AND", u"OR", u"NOT", u"FOLLOWEDBY", u"LIKE", - u"MATCHES", u"ISSUPERSET", u"ISSUBSET", u"LAST", u"IN", u"START", - u"STOP", u"SECONDS", u"TRUE", u"FALSE", u"WITHIN", u"REPEATS", - u"TIMES", u"IdentifierWithoutHyphen", u"IdentifierWithHyphen", - u"EQ", u"NEQ", u"LT", u"LE", u"GT", u"GE", u"QUOTE", u"COLON", - u"DOT", u"COMMA", u"RPAREN", u"LPAREN", u"RBRACK", u"LBRACK", - u"PLUS", u"HYPHEN", u"MINUS", u"POWER_OP", u"DIVIDE", u"ASTERISK", + u"IntNegLiteral", u"IntPosLiteral", u"FloatNegLiteral", u"FloatPosLiteral", + u"HexLiteral", u"BinaryLiteral", u"StringLiteral", u"BoolLiteral", + u"TimestampLiteral", u"AND", u"OR", u"NOT", u"FOLLOWEDBY", u"LIKE", + u"MATCHES", u"ISSUPERSET", u"ISSUBSET", u"LAST", u"IN", u"START", + u"STOP", u"SECONDS", u"TRUE", u"FALSE", u"WITHIN", u"REPEATS", + u"TIMES", u"IdentifierWithoutHyphen", u"IdentifierWithHyphen", + u"EQ", u"NEQ", u"LT", u"LE", u"GT", u"GE", u"QUOTE", u"COLON", + u"DOT", u"COMMA", u"RPAREN", u"LPAREN", u"RBRACK", u"LBRACK", + u"PLUS", u"HYPHEN", u"MINUS", u"POWER_OP", u"DIVIDE", u"ASTERISK", u"WS", u"COMMENT", u"LINE_COMMENT", u"InvalidCharacter" ] - ruleNames = [ u"IntNegLiteral", u"IntPosLiteral", u"FloatNegLiteral", - u"FloatPosLiteral", u"HexLiteral", u"BinaryLiteral", u"StringLiteral", - u"BoolLiteral", u"TimestampLiteral", u"AND", u"OR", u"NOT", - u"FOLLOWEDBY", u"LIKE", u"MATCHES", u"ISSUPERSET", u"ISSUBSET", - u"LAST", u"IN", u"START", u"STOP", u"SECONDS", u"TRUE", - u"FALSE", u"WITHIN", u"REPEATS", u"TIMES", u"IdentifierWithoutHyphen", - u"IdentifierWithHyphen", u"EQ", u"NEQ", u"LT", u"LE", - u"GT", u"GE", u"QUOTE", u"COLON", u"DOT", u"COMMA", u"RPAREN", - u"LPAREN", u"RBRACK", u"LBRACK", u"PLUS", u"HYPHEN", u"MINUS", - u"POWER_OP", u"DIVIDE", u"ASTERISK", u"A", u"B", u"C", - u"D", u"E", u"F", u"G", u"H", u"I", u"J", u"K", u"L", - u"M", u"N", u"O", u"P", u"Q", u"R", u"S", u"T", u"U", - u"V", u"W", u"X", u"Y", u"Z", u"HexDigit", u"TwoHexDigits", + ruleNames = [ u"IntNegLiteral", u"IntPosLiteral", u"FloatNegLiteral", + u"FloatPosLiteral", u"HexLiteral", u"BinaryLiteral", u"StringLiteral", + u"BoolLiteral", u"TimestampLiteral", u"AND", u"OR", u"NOT", + u"FOLLOWEDBY", u"LIKE", u"MATCHES", u"ISSUPERSET", u"ISSUBSET", + u"LAST", u"IN", u"START", u"STOP", u"SECONDS", u"TRUE", + u"FALSE", u"WITHIN", u"REPEATS", u"TIMES", u"IdentifierWithoutHyphen", + u"IdentifierWithHyphen", u"EQ", u"NEQ", u"LT", u"LE", + u"GT", u"GE", u"QUOTE", u"COLON", u"DOT", u"COMMA", u"RPAREN", + u"LPAREN", u"RBRACK", u"LBRACK", u"PLUS", u"HYPHEN", u"MINUS", + u"POWER_OP", u"DIVIDE", u"ASTERISK", u"HexDigit", u"TwoHexDigits", u"Base64Char", u"WS", u"COMMENT", u"LINE_COMMENT", u"InvalidCharacter" ] grammarFileName = u"STIXPattern.g4" @@ -396,3 +346,5 @@ class STIXPatternLexer(Lexer): self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None + + diff --git a/stix2patterns/grammars/STIXPatternListener.py b/stix2patterns/grammars/STIXPatternListener.py index e7f88d4..4af4467 100644 --- a/stix2patterns/grammars/STIXPatternListener.py +++ b/stix2patterns/grammars/STIXPatternListener.py @@ -1,8 +1,6 @@ # Generated from STIXPattern.g4 by ANTLR 4.7.1 - from antlr4 import * - # This class defines a complete listener for a parse tree produced by STIXPatternParser. class STIXPatternListener(ParseTreeListener): @@ -283,3 +281,5 @@ class STIXPatternListener(ParseTreeListener): # Exit a parse tree produced by STIXPatternParser#orderableLiteral. def exitOrderableLiteral(self, ctx): pass + + diff --git a/stix2patterns/grammars/STIXPatternParser.py b/stix2patterns/grammars/STIXPatternParser.py index c4d3e6d..b74cc45 100644 --- a/stix2patterns/grammars/STIXPatternParser.py +++ b/stix2patterns/grammars/STIXPatternParser.py @@ -1,112 +1,109 @@ # Generated from STIXPattern.g4 by ANTLR 4.7.1 # encoding: utf-8 from __future__ import print_function - +from antlr4 import * from io import StringIO import sys -from antlr4 import * - - def serializedATN(): with StringIO() as buf: buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3") - buf.write(u"\67\u00e8\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7") + buf.write(u"\67\u00e9\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7") buf.write(u"\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t") buf.write(u"\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22") - buf.write(u"\4\23\t\23\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\7\3/\n\3\f") - buf.write(u"\3\16\3\62\13\3\3\4\3\4\3\4\3\4\3\4\3\4\7\4:\n\4\f\4") - buf.write(u"\16\4=\13\4\3\5\3\5\3\5\3\5\3\5\3\5\7\5E\n\5\f\5\16\5") - buf.write(u"H\13\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6S\n\6\3") - buf.write(u"\6\3\6\3\6\3\6\3\6\3\6\7\6[\n\6\f\6\16\6^\13\6\3\7\3") - buf.write(u"\7\3\7\3\7\3\7\3\7\7\7f\n\7\f\7\16\7i\13\7\3\b\3\b\3") - buf.write(u"\b\3\b\3\b\3\b\7\bq\n\b\f\b\16\bt\13\b\3\t\3\t\5\tx\n") - buf.write(u"\t\3\t\3\t\3\t\3\t\3\t\5\t\177\n\t\3\t\3\t\3\t\3\t\3") - buf.write(u"\t\5\t\u0086\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u008d\n\t\3") - buf.write(u"\t\3\t\3\t\3\t\3\t\5\t\u0094\n\t\3\t\3\t\3\t\3\t\3\t") - buf.write(u"\5\t\u009b\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u00a2\n\t\3\t") - buf.write(u"\3\t\3\t\3\t\3\t\3\t\3\t\5\t\u00ab\n\t\3\n\3\n\3\n\3") - buf.write(u"\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r\3\r\3") - buf.write(u"\r\3\r\5\r\u00be\n\r\3\16\3\16\3\17\3\17\3\20\3\20\3") - buf.write(u"\20\3\20\3\20\3\20\5\20\u00ca\n\20\3\20\3\20\7\20\u00ce") - buf.write(u"\n\20\f\20\16\20\u00d1\13\20\3\21\3\21\3\21\3\21\3\21") - buf.write(u"\3\21\7\21\u00d9\n\21\f\21\16\21\u00dc\13\21\3\21\3\21") - buf.write(u"\5\21\u00e0\n\21\3\22\3\22\5\22\u00e4\n\22\3\23\3\23") - buf.write(u"\3\23\2\t\4\6\b\n\f\16\36\24\2\4\6\b\n\f\16\20\22\24") - buf.write(u"\26\30\32\34\36 \"$\2\t\3\2 !\3\2\"%\4\2\4\4\6\6\3\2") - buf.write(u"\36\37\4\2\t\t\36\36\4\2\3\4\63\63\4\2\3\t\13\13\2\u00f2") - buf.write(u"\2&\3\2\2\2\4(\3\2\2\2\6\63\3\2\2\2\b>\3\2\2\2\nR\3\2") - buf.write(u"\2\2\f_\3\2\2\2\16j\3\2\2\2\20\u00aa\3\2\2\2\22\u00ac") - buf.write(u"\3\2\2\2\24\u00b1\3\2\2\2\26\u00b5\3\2\2\2\30\u00b9\3") - buf.write(u"\2\2\2\32\u00bf\3\2\2\2\34\u00c1\3\2\2\2\36\u00c9\3\2") - buf.write(u"\2\2 \u00df\3\2\2\2\"\u00e3\3\2\2\2$\u00e5\3\2\2\2&\'") - buf.write(u"\5\4\3\2\'\3\3\2\2\2()\b\3\1\2)*\5\6\4\2*\60\3\2\2\2") - buf.write(u"+,\f\4\2\2,-\7\17\2\2-/\5\4\3\5.+\3\2\2\2/\62\3\2\2\2") - buf.write(u"\60.\3\2\2\2\60\61\3\2\2\2\61\5\3\2\2\2\62\60\3\2\2\2") - buf.write(u"\63\64\b\4\1\2\64\65\5\b\5\2\65;\3\2\2\2\66\67\f\4\2") - buf.write(u"\2\678\7\r\2\28:\5\6\4\59\66\3\2\2\2:=\3\2\2\2;9\3\2") - buf.write(u"\2\2;<\3\2\2\2<\7\3\2\2\2=;\3\2\2\2>?\b\5\1\2?@\5\n\6") - buf.write(u"\2@F\3\2\2\2AB\f\4\2\2BC\7\f\2\2CE\5\b\5\5DA\3\2\2\2") - buf.write(u"EH\3\2\2\2FD\3\2\2\2FG\3\2\2\2G\t\3\2\2\2HF\3\2\2\2I") - buf.write(u"J\b\6\1\2JK\7-\2\2KL\5\f\7\2LM\7,\2\2MS\3\2\2\2NO\7+") - buf.write(u"\2\2OP\5\4\3\2PQ\7*\2\2QS\3\2\2\2RI\3\2\2\2RN\3\2\2\2") - buf.write(u"S\\\3\2\2\2TU\f\5\2\2U[\5\22\n\2VW\f\4\2\2W[\5\24\13") - buf.write(u"\2XY\f\3\2\2Y[\5\26\f\2ZT\3\2\2\2ZV\3\2\2\2ZX\3\2\2\2") - buf.write(u"[^\3\2\2\2\\Z\3\2\2\2\\]\3\2\2\2]\13\3\2\2\2^\\\3\2\2") - buf.write(u"\2_`\b\7\1\2`a\5\16\b\2ag\3\2\2\2bc\f\4\2\2cd\7\r\2\2") - buf.write(u"df\5\f\7\5eb\3\2\2\2fi\3\2\2\2ge\3\2\2\2gh\3\2\2\2h\r") - buf.write(u"\3\2\2\2ig\3\2\2\2jk\b\b\1\2kl\5\20\t\2lr\3\2\2\2mn\f") - buf.write(u"\4\2\2no\7\f\2\2oq\5\16\b\5pm\3\2\2\2qt\3\2\2\2rp\3\2") - buf.write(u"\2\2rs\3\2\2\2s\17\3\2\2\2tr\3\2\2\2uw\5\30\r\2vx\7\16") - buf.write(u"\2\2wv\3\2\2\2wx\3\2\2\2xy\3\2\2\2yz\t\2\2\2z{\5\"\22") - buf.write(u"\2{\u00ab\3\2\2\2|~\5\30\r\2}\177\7\16\2\2~}\3\2\2\2") - buf.write(u"~\177\3\2\2\2\177\u0080\3\2\2\2\u0080\u0081\t\3\2\2\u0081") - buf.write(u"\u0082\5$\23\2\u0082\u00ab\3\2\2\2\u0083\u0085\5\30\r") - buf.write(u"\2\u0084\u0086\7\16\2\2\u0085\u0084\3\2\2\2\u0085\u0086") - buf.write(u"\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088\7\25\2\2\u0088") - buf.write(u"\u0089\5 \21\2\u0089\u00ab\3\2\2\2\u008a\u008c\5\30\r") - buf.write(u"\2\u008b\u008d\7\16\2\2\u008c\u008b\3\2\2\2\u008c\u008d") - buf.write(u"\3\2\2\2\u008d\u008e\3\2\2\2\u008e\u008f\7\20\2\2\u008f") - buf.write(u"\u0090\7\t\2\2\u0090\u00ab\3\2\2\2\u0091\u0093\5\30\r") - buf.write(u"\2\u0092\u0094\7\16\2\2\u0093\u0092\3\2\2\2\u0093\u0094") - buf.write(u"\3\2\2\2\u0094\u0095\3\2\2\2\u0095\u0096\7\21\2\2\u0096") - buf.write(u"\u0097\7\t\2\2\u0097\u00ab\3\2\2\2\u0098\u009a\5\30\r") - buf.write(u"\2\u0099\u009b\7\16\2\2\u009a\u0099\3\2\2\2\u009a\u009b") - buf.write(u"\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d\7\23\2\2\u009d") - buf.write(u"\u009e\7\t\2\2\u009e\u00ab\3\2\2\2\u009f\u00a1\5\30\r") - buf.write(u"\2\u00a0\u00a2\7\16\2\2\u00a1\u00a0\3\2\2\2\u00a1\u00a2") - buf.write(u"\3\2\2\2\u00a2\u00a3\3\2\2\2\u00a3\u00a4\7\22\2\2\u00a4") - buf.write(u"\u00a5\7\t\2\2\u00a5\u00ab\3\2\2\2\u00a6\u00a7\7+\2\2") - buf.write(u"\u00a7\u00a8\5\f\7\2\u00a8\u00a9\7*\2\2\u00a9\u00ab\3") - buf.write(u"\2\2\2\u00aau\3\2\2\2\u00aa|\3\2\2\2\u00aa\u0083\3\2") - buf.write(u"\2\2\u00aa\u008a\3\2\2\2\u00aa\u0091\3\2\2\2\u00aa\u0098") - buf.write(u"\3\2\2\2\u00aa\u009f\3\2\2\2\u00aa\u00a6\3\2\2\2\u00ab") - buf.write(u"\21\3\2\2\2\u00ac\u00ad\7\26\2\2\u00ad\u00ae\7\t\2\2") - buf.write(u"\u00ae\u00af\7\27\2\2\u00af\u00b0\7\t\2\2\u00b0\23\3") - buf.write(u"\2\2\2\u00b1\u00b2\7\33\2\2\u00b2\u00b3\t\4\2\2\u00b3") - buf.write(u"\u00b4\7\30\2\2\u00b4\25\3\2\2\2\u00b5\u00b6\7\34\2\2") - buf.write(u"\u00b6\u00b7\7\4\2\2\u00b7\u00b8\7\35\2\2\u00b8\27\3") - buf.write(u"\2\2\2\u00b9\u00ba\5\32\16\2\u00ba\u00bb\7\'\2\2\u00bb") - buf.write(u"\u00bd\5\34\17\2\u00bc\u00be\5\36\20\2\u00bd\u00bc\3") - buf.write(u"\2\2\2\u00bd\u00be\3\2\2\2\u00be\31\3\2\2\2\u00bf\u00c0") - buf.write(u"\t\5\2\2\u00c0\33\3\2\2\2\u00c1\u00c2\t\6\2\2\u00c2\35") - buf.write(u"\3\2\2\2\u00c3\u00c4\b\20\1\2\u00c4\u00c5\7(\2\2\u00c5") - buf.write(u"\u00ca\t\6\2\2\u00c6\u00c7\7-\2\2\u00c7\u00c8\t\7\2\2") - buf.write(u"\u00c8\u00ca\7,\2\2\u00c9\u00c3\3\2\2\2\u00c9\u00c6\3") - buf.write(u"\2\2\2\u00ca\u00cf\3\2\2\2\u00cb\u00cc\f\5\2\2\u00cc") - buf.write(u"\u00ce\5\36\20\6\u00cd\u00cb\3\2\2\2\u00ce\u00d1\3\2") - buf.write(u"\2\2\u00cf\u00cd\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\37") - buf.write(u"\3\2\2\2\u00d1\u00cf\3\2\2\2\u00d2\u00d3\7+\2\2\u00d3") - buf.write(u"\u00e0\7*\2\2\u00d4\u00d5\7+\2\2\u00d5\u00da\5\"\22\2") - buf.write(u"\u00d6\u00d7\7)\2\2\u00d7\u00d9\5\"\22\2\u00d8\u00d6") - buf.write(u"\3\2\2\2\u00d9\u00dc\3\2\2\2\u00da\u00d8\3\2\2\2\u00da") - buf.write(u"\u00db\3\2\2\2\u00db\u00dd\3\2\2\2\u00dc\u00da\3\2\2") - buf.write(u"\2\u00dd\u00de\7*\2\2\u00de\u00e0\3\2\2\2\u00df\u00d2") - buf.write(u"\3\2\2\2\u00df\u00d4\3\2\2\2\u00e0!\3\2\2\2\u00e1\u00e4") - buf.write(u"\5$\23\2\u00e2\u00e4\7\n\2\2\u00e3\u00e1\3\2\2\2\u00e3") - buf.write(u"\u00e2\3\2\2\2\u00e4#\3\2\2\2\u00e5\u00e6\t\b\2\2\u00e6") - buf.write(u"%\3\2\2\2\30\60;FRZ\\grw~\u0085\u008c\u0093\u009a\u00a1") - buf.write(u"\u00aa\u00bd\u00c9\u00cf\u00da\u00df\u00e3") + buf.write(u"\4\23\t\23\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\7\3\60") + buf.write(u"\n\3\f\3\16\3\63\13\3\3\4\3\4\3\4\3\4\3\4\3\4\7\4;\n") + buf.write(u"\4\f\4\16\4>\13\4\3\5\3\5\3\5\3\5\3\5\3\5\7\5F\n\5\f") + buf.write(u"\5\16\5I\13\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6") + buf.write(u"T\n\6\3\6\3\6\3\6\3\6\3\6\3\6\7\6\\\n\6\f\6\16\6_\13") + buf.write(u"\6\3\7\3\7\3\7\3\7\3\7\3\7\7\7g\n\7\f\7\16\7j\13\7\3") + buf.write(u"\b\3\b\3\b\3\b\3\b\3\b\7\br\n\b\f\b\16\bu\13\b\3\t\3") + buf.write(u"\t\5\ty\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u0080\n\t\3\t\3\t") + buf.write(u"\3\t\3\t\3\t\5\t\u0087\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u008e") + buf.write(u"\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u0095\n\t\3\t\3\t\3\t\3") + buf.write(u"\t\3\t\5\t\u009c\n\t\3\t\3\t\3\t\3\t\3\t\5\t\u00a3\n") + buf.write(u"\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\5\t\u00ac\n\t\3\n\3\n") + buf.write(u"\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r") + buf.write(u"\3\r\3\r\3\r\5\r\u00bf\n\r\3\16\3\16\3\17\3\17\3\20\3") + buf.write(u"\20\3\20\3\20\3\20\3\20\5\20\u00cb\n\20\3\20\3\20\7\20") + buf.write(u"\u00cf\n\20\f\20\16\20\u00d2\13\20\3\21\3\21\3\21\3\21") + buf.write(u"\3\21\3\21\7\21\u00da\n\21\f\21\16\21\u00dd\13\21\3\21") + buf.write(u"\3\21\5\21\u00e1\n\21\3\22\3\22\5\22\u00e5\n\22\3\23") + buf.write(u"\3\23\3\23\2\t\4\6\b\n\f\16\36\24\2\4\6\b\n\f\16\20\22") + buf.write(u"\24\26\30\32\34\36 \"$\2\t\3\2 !\3\2\"%\4\2\4\4\6\6\3") + buf.write(u"\2\36\37\4\2\t\t\36\36\4\2\3\4\63\63\4\2\3\t\13\13\2") + buf.write(u"\u00f3\2&\3\2\2\2\4)\3\2\2\2\6\64\3\2\2\2\b?\3\2\2\2") + buf.write(u"\nS\3\2\2\2\f`\3\2\2\2\16k\3\2\2\2\20\u00ab\3\2\2\2\22") + buf.write(u"\u00ad\3\2\2\2\24\u00b2\3\2\2\2\26\u00b6\3\2\2\2\30\u00ba") + buf.write(u"\3\2\2\2\32\u00c0\3\2\2\2\34\u00c2\3\2\2\2\36\u00ca\3") + buf.write(u"\2\2\2 \u00e0\3\2\2\2\"\u00e4\3\2\2\2$\u00e6\3\2\2\2") + buf.write(u"&\'\5\4\3\2\'(\7\2\2\3(\3\3\2\2\2)*\b\3\1\2*+\5\6\4\2") + buf.write(u"+\61\3\2\2\2,-\f\4\2\2-.\7\17\2\2.\60\5\4\3\5/,\3\2\2") + buf.write(u"\2\60\63\3\2\2\2\61/\3\2\2\2\61\62\3\2\2\2\62\5\3\2\2") + buf.write(u"\2\63\61\3\2\2\2\64\65\b\4\1\2\65\66\5\b\5\2\66<\3\2") + buf.write(u"\2\2\678\f\4\2\289\7\r\2\29;\5\6\4\5:\67\3\2\2\2;>\3") + buf.write(u"\2\2\2<:\3\2\2\2<=\3\2\2\2=\7\3\2\2\2><\3\2\2\2?@\b\5") + buf.write(u"\1\2@A\5\n\6\2AG\3\2\2\2BC\f\4\2\2CD\7\f\2\2DF\5\b\5") + buf.write(u"\5EB\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2H\t\3\2\2\2") + buf.write(u"IG\3\2\2\2JK\b\6\1\2KL\7-\2\2LM\5\f\7\2MN\7,\2\2NT\3") + buf.write(u"\2\2\2OP\7+\2\2PQ\5\4\3\2QR\7*\2\2RT\3\2\2\2SJ\3\2\2") + buf.write(u"\2SO\3\2\2\2T]\3\2\2\2UV\f\5\2\2V\\\5\22\n\2WX\f\4\2") + buf.write(u"\2X\\\5\24\13\2YZ\f\3\2\2Z\\\5\26\f\2[U\3\2\2\2[W\3\2") + buf.write(u"\2\2[Y\3\2\2\2\\_\3\2\2\2][\3\2\2\2]^\3\2\2\2^\13\3\2") + buf.write(u"\2\2_]\3\2\2\2`a\b\7\1\2ab\5\16\b\2bh\3\2\2\2cd\f\4\2") + buf.write(u"\2de\7\r\2\2eg\5\f\7\5fc\3\2\2\2gj\3\2\2\2hf\3\2\2\2") + buf.write(u"hi\3\2\2\2i\r\3\2\2\2jh\3\2\2\2kl\b\b\1\2lm\5\20\t\2") + buf.write(u"ms\3\2\2\2no\f\4\2\2op\7\f\2\2pr\5\16\b\5qn\3\2\2\2r") + buf.write(u"u\3\2\2\2sq\3\2\2\2st\3\2\2\2t\17\3\2\2\2us\3\2\2\2v") + buf.write(u"x\5\30\r\2wy\7\16\2\2xw\3\2\2\2xy\3\2\2\2yz\3\2\2\2z") + buf.write(u"{\t\2\2\2{|\5\"\22\2|\u00ac\3\2\2\2}\177\5\30\r\2~\u0080") + buf.write(u"\7\16\2\2\177~\3\2\2\2\177\u0080\3\2\2\2\u0080\u0081") + buf.write(u"\3\2\2\2\u0081\u0082\t\3\2\2\u0082\u0083\5$\23\2\u0083") + buf.write(u"\u00ac\3\2\2\2\u0084\u0086\5\30\r\2\u0085\u0087\7\16") + buf.write(u"\2\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088") + buf.write(u"\3\2\2\2\u0088\u0089\7\25\2\2\u0089\u008a\5 \21\2\u008a") + buf.write(u"\u00ac\3\2\2\2\u008b\u008d\5\30\r\2\u008c\u008e\7\16") + buf.write(u"\2\2\u008d\u008c\3\2\2\2\u008d\u008e\3\2\2\2\u008e\u008f") + buf.write(u"\3\2\2\2\u008f\u0090\7\20\2\2\u0090\u0091\7\t\2\2\u0091") + buf.write(u"\u00ac\3\2\2\2\u0092\u0094\5\30\r\2\u0093\u0095\7\16") + buf.write(u"\2\2\u0094\u0093\3\2\2\2\u0094\u0095\3\2\2\2\u0095\u0096") + buf.write(u"\3\2\2\2\u0096\u0097\7\21\2\2\u0097\u0098\7\t\2\2\u0098") + buf.write(u"\u00ac\3\2\2\2\u0099\u009b\5\30\r\2\u009a\u009c\7\16") + buf.write(u"\2\2\u009b\u009a\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d") + buf.write(u"\3\2\2\2\u009d\u009e\7\23\2\2\u009e\u009f\7\t\2\2\u009f") + buf.write(u"\u00ac\3\2\2\2\u00a0\u00a2\5\30\r\2\u00a1\u00a3\7\16") + buf.write(u"\2\2\u00a2\u00a1\3\2\2\2\u00a2\u00a3\3\2\2\2\u00a3\u00a4") + buf.write(u"\3\2\2\2\u00a4\u00a5\7\22\2\2\u00a5\u00a6\7\t\2\2\u00a6") + buf.write(u"\u00ac\3\2\2\2\u00a7\u00a8\7+\2\2\u00a8\u00a9\5\f\7\2") + buf.write(u"\u00a9\u00aa\7*\2\2\u00aa\u00ac\3\2\2\2\u00abv\3\2\2") + buf.write(u"\2\u00ab}\3\2\2\2\u00ab\u0084\3\2\2\2\u00ab\u008b\3\2") + buf.write(u"\2\2\u00ab\u0092\3\2\2\2\u00ab\u0099\3\2\2\2\u00ab\u00a0") + buf.write(u"\3\2\2\2\u00ab\u00a7\3\2\2\2\u00ac\21\3\2\2\2\u00ad\u00ae") + buf.write(u"\7\26\2\2\u00ae\u00af\7\t\2\2\u00af\u00b0\7\27\2\2\u00b0") + buf.write(u"\u00b1\7\t\2\2\u00b1\23\3\2\2\2\u00b2\u00b3\7\33\2\2") + buf.write(u"\u00b3\u00b4\t\4\2\2\u00b4\u00b5\7\30\2\2\u00b5\25\3") + buf.write(u"\2\2\2\u00b6\u00b7\7\34\2\2\u00b7\u00b8\7\4\2\2\u00b8") + buf.write(u"\u00b9\7\35\2\2\u00b9\27\3\2\2\2\u00ba\u00bb\5\32\16") + buf.write(u"\2\u00bb\u00bc\7\'\2\2\u00bc\u00be\5\34\17\2\u00bd\u00bf") + buf.write(u"\5\36\20\2\u00be\u00bd\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf") + buf.write(u"\31\3\2\2\2\u00c0\u00c1\t\5\2\2\u00c1\33\3\2\2\2\u00c2") + buf.write(u"\u00c3\t\6\2\2\u00c3\35\3\2\2\2\u00c4\u00c5\b\20\1\2") + buf.write(u"\u00c5\u00c6\7(\2\2\u00c6\u00cb\t\6\2\2\u00c7\u00c8\7") + buf.write(u"-\2\2\u00c8\u00c9\t\7\2\2\u00c9\u00cb\7,\2\2\u00ca\u00c4") + buf.write(u"\3\2\2\2\u00ca\u00c7\3\2\2\2\u00cb\u00d0\3\2\2\2\u00cc") + buf.write(u"\u00cd\f\5\2\2\u00cd\u00cf\5\36\20\6\u00ce\u00cc\3\2") + buf.write(u"\2\2\u00cf\u00d2\3\2\2\2\u00d0\u00ce\3\2\2\2\u00d0\u00d1") + buf.write(u"\3\2\2\2\u00d1\37\3\2\2\2\u00d2\u00d0\3\2\2\2\u00d3\u00d4") + buf.write(u"\7+\2\2\u00d4\u00e1\7*\2\2\u00d5\u00d6\7+\2\2\u00d6\u00db") + buf.write(u"\5\"\22\2\u00d7\u00d8\7)\2\2\u00d8\u00da\5\"\22\2\u00d9") + buf.write(u"\u00d7\3\2\2\2\u00da\u00dd\3\2\2\2\u00db\u00d9\3\2\2") + buf.write(u"\2\u00db\u00dc\3\2\2\2\u00dc\u00de\3\2\2\2\u00dd\u00db") + buf.write(u"\3\2\2\2\u00de\u00df\7*\2\2\u00df\u00e1\3\2\2\2\u00e0") + buf.write(u"\u00d3\3\2\2\2\u00e0\u00d5\3\2\2\2\u00e1!\3\2\2\2\u00e2") + buf.write(u"\u00e5\5$\23\2\u00e3\u00e5\7\n\2\2\u00e4\u00e2\3\2\2") + buf.write(u"\2\u00e4\u00e3\3\2\2\2\u00e5#\3\2\2\2\u00e6\u00e7\t\b") + buf.write(u"\2\2\u00e7%\3\2\2\2\30\61<GS[]hsx\177\u0086\u008d\u0094") + buf.write(u"\u009b\u00a2\u00ab\u00be\u00ca\u00d0\u00db\u00e0\u00e4") return buf.getvalue() @@ -120,29 +117,28 @@ class STIXPatternParser ( Parser ): sharedContextCache = PredictionContextCache() - literalNames = [ u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", - u"'<'", u"'<='", u"'>'", u"'>='", u"'''", u"':'", u"'.'", - u"','", u"')'", u"'('", u"']'", u"'['", u"'+'", u"<INVALID>", - u"'-'", u"'^'", u"'/'", u"'*'" ] - - symbolicNames = [ u"<INVALID>", u"IntNegLiteral", u"IntPosLiteral", - u"FloatNegLiteral", u"FloatPosLiteral", u"HexLiteral", - u"BinaryLiteral", u"StringLiteral", u"BoolLiteral", - u"TimestampLiteral", u"AND", u"OR", u"NOT", u"FOLLOWEDBY", - u"LIKE", u"MATCHES", u"ISSUPERSET", u"ISSUBSET", u"LAST", - u"IN", u"START", u"STOP", u"SECONDS", u"TRUE", u"FALSE", - u"WITHIN", u"REPEATS", u"TIMES", u"IdentifierWithoutHyphen", - u"IdentifierWithHyphen", u"EQ", u"NEQ", u"LT", u"LE", - u"GT", u"GE", u"QUOTE", u"COLON", u"DOT", u"COMMA", - u"RPAREN", u"LPAREN", u"RBRACK", u"LBRACK", u"PLUS", - u"HYPHEN", u"MINUS", u"POWER_OP", u"DIVIDE", u"ASTERISK", + literalNames = [ u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", + u"<INVALID>", u"<INVALID>", u"<INVALID>", u"<INVALID>", + u"<INVALID>", u"<INVALID>", u"'AND'", u"'OR'", u"'NOT'", + u"'FOLLOWEDBY'", u"'LIKE'", u"'MATCHES'", u"'ISSUPERSET'", + u"'ISSUBSET'", u"'LAST'", u"'IN'", u"'START'", u"'STOP'", + u"'SECONDS'", u"'true'", u"'false'", u"'WITHIN'", u"'REPEATS'", + u"'TIMES'", u"<INVALID>", u"<INVALID>", u"<INVALID>", + u"<INVALID>", u"'<'", u"'<='", u"'>'", u"'>='", u"'''", + u"':'", u"'.'", u"','", u"')'", u"'('", u"']'", u"'['", + u"'+'", u"<INVALID>", u"'-'", u"'^'", u"'/'", u"'*'" ] + + symbolicNames = [ u"<INVALID>", u"IntNegLiteral", u"IntPosLiteral", + u"FloatNegLiteral", u"FloatPosLiteral", u"HexLiteral", + u"BinaryLiteral", u"StringLiteral", u"BoolLiteral", + u"TimestampLiteral", u"AND", u"OR", u"NOT", u"FOLLOWEDBY", + u"LIKE", u"MATCHES", u"ISSUPERSET", u"ISSUBSET", u"LAST", + u"IN", u"START", u"STOP", u"SECONDS", u"TRUE", u"FALSE", + u"WITHIN", u"REPEATS", u"TIMES", u"IdentifierWithoutHyphen", + u"IdentifierWithHyphen", u"EQ", u"NEQ", u"LT", u"LE", + u"GT", u"GE", u"QUOTE", u"COLON", u"DOT", u"COMMA", + u"RPAREN", u"LPAREN", u"RBRACK", u"LBRACK", u"PLUS", + u"HYPHEN", u"MINUS", u"POWER_OP", u"DIVIDE", u"ASTERISK", u"WS", u"COMMENT", u"LINE_COMMENT", u"InvalidCharacter" ] RULE_pattern = 0 @@ -164,12 +160,12 @@ class STIXPatternParser ( Parser ): RULE_primitiveLiteral = 16 RULE_orderableLiteral = 17 - ruleNames = [ u"pattern", u"observationExpressions", u"observationExpressionOr", - u"observationExpressionAnd", u"observationExpression", - u"comparisonExpression", u"comparisonExpressionAnd", - u"propTest", u"startStopQualifier", u"withinQualifier", - u"repeatedQualifier", u"objectPath", u"objectType", u"firstPathComponent", - u"objectPathComponent", u"setLiteral", u"primitiveLiteral", + ruleNames = [ u"pattern", u"observationExpressions", u"observationExpressionOr", + u"observationExpressionAnd", u"observationExpression", + u"comparisonExpression", u"comparisonExpressionAnd", + u"propTest", u"startStopQualifier", u"withinQualifier", + u"repeatedQualifier", u"objectPath", u"objectType", u"firstPathComponent", + u"objectPathComponent", u"setLiteral", u"primitiveLiteral", u"orderableLiteral" ] EOF = Token.EOF @@ -245,6 +241,9 @@ class STIXPatternParser ( Parser ): return self.getTypedRuleContext(STIXPatternParser.ObservationExpressionsContext,0) + def EOF(self): + return self.getToken(STIXPatternParser.EOF, 0) + def getRuleIndex(self): return STIXPatternParser.RULE_pattern @@ -267,6 +266,8 @@ class STIXPatternParser ( Parser ): self.enterOuterAlt(localctx, 1) self.state = 36 self.observationExpressions(0) + self.state = 37 + self.match(STIXPatternParser.EOF) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) @@ -317,10 +318,10 @@ class STIXPatternParser ( Parser ): self.enterRecursionRule(localctx, 2, self.RULE_observationExpressions, _p) try: self.enterOuterAlt(localctx, 1) - self.state = 39 + self.state = 40 self.observationExpressionOr(0) self._ctx.stop = self._input.LT(-1) - self.state = 46 + self.state = 47 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,0,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -330,15 +331,15 @@ class STIXPatternParser ( Parser ): _prevctx = localctx localctx = STIXPatternParser.ObservationExpressionsContext(self, _parentctx, _parentState) self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpressions) - self.state = 41 + self.state = 42 if not self.precpred(self._ctx, 2): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 2)") - self.state = 42 - self.match(STIXPatternParser.FOLLOWEDBY) self.state = 43 - self.observationExpressions(3) - self.state = 48 + self.match(STIXPatternParser.FOLLOWEDBY) + self.state = 44 + self.observationExpressions(3) + self.state = 49 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,0,self._ctx) @@ -392,10 +393,10 @@ class STIXPatternParser ( Parser ): self.enterRecursionRule(localctx, 4, self.RULE_observationExpressionOr, _p) try: self.enterOuterAlt(localctx, 1) - self.state = 50 + self.state = 51 self.observationExpressionAnd(0) self._ctx.stop = self._input.LT(-1) - self.state = 57 + self.state = 58 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,1,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -405,15 +406,15 @@ class STIXPatternParser ( Parser ): _prevctx = localctx localctx = STIXPatternParser.ObservationExpressionOrContext(self, _parentctx, _parentState) self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpressionOr) - self.state = 52 + self.state = 53 if not self.precpred(self._ctx, 2): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 2)") - self.state = 53 - self.match(STIXPatternParser.OR) self.state = 54 - self.observationExpressionOr(3) - self.state = 59 + self.match(STIXPatternParser.OR) + self.state = 55 + self.observationExpressionOr(3) + self.state = 60 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,1,self._ctx) @@ -467,10 +468,10 @@ class STIXPatternParser ( Parser ): self.enterRecursionRule(localctx, 6, self.RULE_observationExpressionAnd, _p) try: self.enterOuterAlt(localctx, 1) - self.state = 61 + self.state = 62 self.observationExpression(0) self._ctx.stop = self._input.LT(-1) - self.state = 68 + self.state = 69 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,2,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -480,15 +481,15 @@ class STIXPatternParser ( Parser ): _prevctx = localctx localctx = STIXPatternParser.ObservationExpressionAndContext(self, _parentctx, _parentState) self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpressionAnd) - self.state = 63 + self.state = 64 if not self.precpred(self._ctx, 2): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 2)") - self.state = 64 - self.match(STIXPatternParser.AND) self.state = 65 - self.observationExpressionAnd(3) - self.state = 70 + self.match(STIXPatternParser.AND) + self.state = 66 + self.observationExpressionAnd(3) + self.state = 71 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,2,self._ctx) @@ -510,7 +511,7 @@ class STIXPatternParser ( Parser ): def getRuleIndex(self): return STIXPatternParser.RULE_observationExpression - + def copyFrom(self, ctx): super(STIXPatternParser.ObservationExpressionContext, self).copyFrom(ctx) @@ -637,7 +638,7 @@ class STIXPatternParser ( Parser ): self.enterRecursionRule(localctx, 8, self.RULE_observationExpression, _p) try: self.enterOuterAlt(localctx, 1) - self.state = 80 + self.state = 81 self._errHandler.sync(self) token = self._input.LA(1) if token in [STIXPatternParser.LBRACK]: @@ -645,29 +646,29 @@ class STIXPatternParser ( Parser ): self._ctx = localctx _prevctx = localctx - self.state = 72 - self.match(STIXPatternParser.LBRACK) self.state = 73 - self.comparisonExpression(0) + self.match(STIXPatternParser.LBRACK) self.state = 74 + self.comparisonExpression(0) + self.state = 75 self.match(STIXPatternParser.RBRACK) pass elif token in [STIXPatternParser.LPAREN]: localctx = STIXPatternParser.ObservationExpressionCompoundContext(self, localctx) self._ctx = localctx _prevctx = localctx - self.state = 76 - self.match(STIXPatternParser.LPAREN) self.state = 77 - self.observationExpressions(0) + self.match(STIXPatternParser.LPAREN) self.state = 78 + self.observationExpressions(0) + self.state = 79 self.match(STIXPatternParser.RPAREN) pass else: raise NoViableAltException(self) self._ctx.stop = self._input.LT(-1) - self.state = 90 + self.state = 91 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,5,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -675,44 +676,44 @@ class STIXPatternParser ( Parser ): if self._parseListeners is not None: self.triggerExitRuleEvent() _prevctx = localctx - self.state = 88 + self.state = 89 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,4,self._ctx) if la_ == 1: localctx = STIXPatternParser.ObservationExpressionStartStopContext(self, STIXPatternParser.ObservationExpressionContext(self, _parentctx, _parentState)) self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpression) - self.state = 82 + self.state = 83 if not self.precpred(self._ctx, 3): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 3)") - self.state = 83 + self.state = 84 self.startStopQualifier() pass elif la_ == 2: localctx = STIXPatternParser.ObservationExpressionWithinContext(self, STIXPatternParser.ObservationExpressionContext(self, _parentctx, _parentState)) self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpression) - self.state = 84 + self.state = 85 if not self.precpred(self._ctx, 2): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 2)") - self.state = 85 + self.state = 86 self.withinQualifier() pass elif la_ == 3: localctx = STIXPatternParser.ObservationExpressionRepeatedContext(self, STIXPatternParser.ObservationExpressionContext(self, _parentctx, _parentState)) self.pushNewRecursionContext(localctx, _startState, self.RULE_observationExpression) - self.state = 86 + self.state = 87 if not self.precpred(self._ctx, 1): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 1)") - self.state = 87 + self.state = 88 self.repeatedQualifier() pass - - self.state = 92 + + self.state = 93 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,5,self._ctx) @@ -766,10 +767,10 @@ class STIXPatternParser ( Parser ): self.enterRecursionRule(localctx, 10, self.RULE_comparisonExpression, _p) try: self.enterOuterAlt(localctx, 1) - self.state = 94 + self.state = 95 self.comparisonExpressionAnd(0) self._ctx.stop = self._input.LT(-1) - self.state = 101 + self.state = 102 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,6,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -779,15 +780,15 @@ class STIXPatternParser ( Parser ): _prevctx = localctx localctx = STIXPatternParser.ComparisonExpressionContext(self, _parentctx, _parentState) self.pushNewRecursionContext(localctx, _startState, self.RULE_comparisonExpression) - self.state = 96 + self.state = 97 if not self.precpred(self._ctx, 2): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 2)") - self.state = 97 - self.match(STIXPatternParser.OR) self.state = 98 - self.comparisonExpression(3) - self.state = 103 + self.match(STIXPatternParser.OR) + self.state = 99 + self.comparisonExpression(3) + self.state = 104 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,6,self._ctx) @@ -841,10 +842,10 @@ class STIXPatternParser ( Parser ): self.enterRecursionRule(localctx, 12, self.RULE_comparisonExpressionAnd, _p) try: self.enterOuterAlt(localctx, 1) - self.state = 105 + self.state = 106 self.propTest() self._ctx.stop = self._input.LT(-1) - self.state = 112 + self.state = 113 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,7,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -854,15 +855,15 @@ class STIXPatternParser ( Parser ): _prevctx = localctx localctx = STIXPatternParser.ComparisonExpressionAndContext(self, _parentctx, _parentState) self.pushNewRecursionContext(localctx, _startState, self.RULE_comparisonExpressionAnd) - self.state = 107 + self.state = 108 if not self.precpred(self._ctx, 2): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 2)") - self.state = 108 - self.match(STIXPatternParser.AND) self.state = 109 - self.comparisonExpressionAnd(3) - self.state = 114 + self.match(STIXPatternParser.AND) + self.state = 110 + self.comparisonExpressionAnd(3) + self.state = 115 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,7,self._ctx) @@ -884,7 +885,7 @@ class STIXPatternParser ( Parser ): def getRuleIndex(self): return STIXPatternParser.RULE_propTest - + def copyFrom(self, ctx): super(STIXPatternParser.PropTestContext, self).copyFrom(ctx) @@ -1106,160 +1107,160 @@ class STIXPatternParser ( Parser ): self.enterRule(localctx, 14, self.RULE_propTest) self._la = 0 # Token type try: - self.state = 168 + self.state = 169 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,15,self._ctx) if la_ == 1: localctx = STIXPatternParser.PropTestEqualContext(self, localctx) self.enterOuterAlt(localctx, 1) - self.state = 115 + self.state = 116 self.objectPath() - self.state = 117 + self.state = 118 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 116 + self.state = 117 self.match(STIXPatternParser.NOT) - self.state = 119 + self.state = 120 _la = self._input.LA(1) if not(_la==STIXPatternParser.EQ or _la==STIXPatternParser.NEQ): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() - self.state = 120 + self.state = 121 self.primitiveLiteral() pass elif la_ == 2: localctx = STIXPatternParser.PropTestOrderContext(self, localctx) self.enterOuterAlt(localctx, 2) - self.state = 122 + self.state = 123 self.objectPath() - self.state = 124 + self.state = 125 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 123 + self.state = 124 self.match(STIXPatternParser.NOT) - self.state = 126 + self.state = 127 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << STIXPatternParser.LT) | (1 << STIXPatternParser.LE) | (1 << STIXPatternParser.GT) | (1 << STIXPatternParser.GE))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() - self.state = 127 + self.state = 128 self.orderableLiteral() pass elif la_ == 3: localctx = STIXPatternParser.PropTestSetContext(self, localctx) self.enterOuterAlt(localctx, 3) - self.state = 129 + self.state = 130 self.objectPath() - self.state = 131 + self.state = 132 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 130 + self.state = 131 self.match(STIXPatternParser.NOT) - self.state = 133 - self.match(STIXPatternParser.IN) self.state = 134 + self.match(STIXPatternParser.IN) + self.state = 135 self.setLiteral() pass elif la_ == 4: localctx = STIXPatternParser.PropTestLikeContext(self, localctx) self.enterOuterAlt(localctx, 4) - self.state = 136 + self.state = 137 self.objectPath() - self.state = 138 + self.state = 139 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 137 + self.state = 138 self.match(STIXPatternParser.NOT) - self.state = 140 - self.match(STIXPatternParser.LIKE) self.state = 141 + self.match(STIXPatternParser.LIKE) + self.state = 142 self.match(STIXPatternParser.StringLiteral) pass elif la_ == 5: localctx = STIXPatternParser.PropTestRegexContext(self, localctx) self.enterOuterAlt(localctx, 5) - self.state = 143 + self.state = 144 self.objectPath() - self.state = 145 + self.state = 146 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 144 + self.state = 145 self.match(STIXPatternParser.NOT) - self.state = 147 - self.match(STIXPatternParser.MATCHES) self.state = 148 + self.match(STIXPatternParser.MATCHES) + self.state = 149 self.match(STIXPatternParser.StringLiteral) pass elif la_ == 6: localctx = STIXPatternParser.PropTestIsSubsetContext(self, localctx) self.enterOuterAlt(localctx, 6) - self.state = 150 + self.state = 151 self.objectPath() - self.state = 152 + self.state = 153 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 151 + self.state = 152 self.match(STIXPatternParser.NOT) - self.state = 154 - self.match(STIXPatternParser.ISSUBSET) self.state = 155 + self.match(STIXPatternParser.ISSUBSET) + self.state = 156 self.match(STIXPatternParser.StringLiteral) pass elif la_ == 7: localctx = STIXPatternParser.PropTestIsSupersetContext(self, localctx) self.enterOuterAlt(localctx, 7) - self.state = 157 + self.state = 158 self.objectPath() - self.state = 159 + self.state = 160 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.NOT: - self.state = 158 + self.state = 159 self.match(STIXPatternParser.NOT) - self.state = 161 - self.match(STIXPatternParser.ISSUPERSET) self.state = 162 + self.match(STIXPatternParser.ISSUPERSET) + self.state = 163 self.match(STIXPatternParser.StringLiteral) pass elif la_ == 8: localctx = STIXPatternParser.PropTestParenContext(self, localctx) self.enterOuterAlt(localctx, 8) - self.state = 164 - self.match(STIXPatternParser.LPAREN) self.state = 165 - self.comparisonExpression(0) + self.match(STIXPatternParser.LPAREN) self.state = 166 + self.comparisonExpression(0) + self.state = 167 self.match(STIXPatternParser.RPAREN) pass @@ -1310,13 +1311,13 @@ class STIXPatternParser ( Parser ): self.enterRule(localctx, 16, self.RULE_startStopQualifier) try: self.enterOuterAlt(localctx, 1) - self.state = 170 - self.match(STIXPatternParser.START) self.state = 171 - self.match(STIXPatternParser.StringLiteral) + self.match(STIXPatternParser.START) self.state = 172 - self.match(STIXPatternParser.STOP) + self.match(STIXPatternParser.StringLiteral) self.state = 173 + self.match(STIXPatternParser.STOP) + self.state = 174 self.match(STIXPatternParser.StringLiteral) except RecognitionException as re: localctx.exception = re @@ -1365,16 +1366,16 @@ class STIXPatternParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 175 - self.match(STIXPatternParser.WITHIN) self.state = 176 + self.match(STIXPatternParser.WITHIN) + self.state = 177 _la = self._input.LA(1) if not(_la==STIXPatternParser.IntPosLiteral or _la==STIXPatternParser.FloatPosLiteral): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() - self.state = 177 + self.state = 178 self.match(STIXPatternParser.SECONDS) except RecognitionException as re: localctx.exception = re @@ -1419,11 +1420,11 @@ class STIXPatternParser ( Parser ): self.enterRule(localctx, 20, self.RULE_repeatedQualifier) try: self.enterOuterAlt(localctx, 1) - self.state = 179 - self.match(STIXPatternParser.REPEATS) self.state = 180 - self.match(STIXPatternParser.IntPosLiteral) + self.match(STIXPatternParser.REPEATS) self.state = 181 + self.match(STIXPatternParser.IntPosLiteral) + self.state = 182 self.match(STIXPatternParser.TIMES) except RecognitionException as re: localctx.exception = re @@ -1475,17 +1476,17 @@ class STIXPatternParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 183 - self.objectType() self.state = 184 - self.match(STIXPatternParser.COLON) + self.objectType() self.state = 185 + self.match(STIXPatternParser.COLON) + self.state = 186 self.firstPathComponent() - self.state = 187 + self.state = 188 self._errHandler.sync(self) _la = self._input.LA(1) if _la==STIXPatternParser.DOT or _la==STIXPatternParser.LBRACK: - self.state = 186 + self.state = 187 self.objectPathComponent(0) @@ -1530,7 +1531,7 @@ class STIXPatternParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 189 + self.state = 190 _la = self._input.LA(1) if not(_la==STIXPatternParser.IdentifierWithoutHyphen or _la==STIXPatternParser.IdentifierWithHyphen): self._errHandler.recoverInline(self) @@ -1578,7 +1579,7 @@ class STIXPatternParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 191 + self.state = 192 _la = self._input.LA(1) if not(_la==STIXPatternParser.StringLiteral or _la==STIXPatternParser.IdentifierWithoutHyphen): self._errHandler.recoverInline(self) @@ -1603,7 +1604,7 @@ class STIXPatternParser ( Parser ): def getRuleIndex(self): return STIXPatternParser.RULE_objectPathComponent - + def copyFrom(self, ctx): super(STIXPatternParser.ObjectPathComponentContext, self).copyFrom(ctx) @@ -1687,7 +1688,7 @@ class STIXPatternParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 199 + self.state = 200 self._errHandler.sync(self) token = self._input.LA(1) if token in [STIXPatternParser.DOT]: @@ -1695,9 +1696,9 @@ class STIXPatternParser ( Parser ): self._ctx = localctx _prevctx = localctx - self.state = 194 - self.match(STIXPatternParser.DOT) self.state = 195 + self.match(STIXPatternParser.DOT) + self.state = 196 _la = self._input.LA(1) if not(_la==STIXPatternParser.StringLiteral or _la==STIXPatternParser.IdentifierWithoutHyphen): self._errHandler.recoverInline(self) @@ -1709,23 +1710,23 @@ class STIXPatternParser ( Parser ): localctx = STIXPatternParser.IndexPathStepContext(self, localctx) self._ctx = localctx _prevctx = localctx - self.state = 196 - self.match(STIXPatternParser.LBRACK) self.state = 197 + self.match(STIXPatternParser.LBRACK) + self.state = 198 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << STIXPatternParser.IntNegLiteral) | (1 << STIXPatternParser.IntPosLiteral) | (1 << STIXPatternParser.ASTERISK))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() - self.state = 198 + self.state = 199 self.match(STIXPatternParser.RBRACK) pass else: raise NoViableAltException(self) self._ctx.stop = self._input.LT(-1) - self.state = 205 + self.state = 206 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,18,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: @@ -1735,13 +1736,13 @@ class STIXPatternParser ( Parser ): _prevctx = localctx localctx = STIXPatternParser.PathStepContext(self, STIXPatternParser.ObjectPathComponentContext(self, _parentctx, _parentState)) self.pushNewRecursionContext(localctx, _startState, self.RULE_objectPathComponent) - self.state = 201 + self.state = 202 if not self.precpred(self._ctx, 3): from antlr4.error.Errors import FailedPredicateException raise FailedPredicateException(self, "self.precpred(self._ctx, 3)") - self.state = 202 - self.objectPathComponent(4) - self.state = 207 + self.state = 203 + self.objectPathComponent(4) + self.state = 208 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,18,self._ctx) @@ -1798,36 +1799,36 @@ class STIXPatternParser ( Parser ): self.enterRule(localctx, 30, self.RULE_setLiteral) self._la = 0 # Token type try: - self.state = 221 + self.state = 222 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,20,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) - self.state = 208 - self.match(STIXPatternParser.LPAREN) self.state = 209 + self.match(STIXPatternParser.LPAREN) + self.state = 210 self.match(STIXPatternParser.RPAREN) pass elif la_ == 2: self.enterOuterAlt(localctx, 2) - self.state = 210 - self.match(STIXPatternParser.LPAREN) self.state = 211 + self.match(STIXPatternParser.LPAREN) + self.state = 212 self.primitiveLiteral() - self.state = 216 + self.state = 217 self._errHandler.sync(self) _la = self._input.LA(1) while _la==STIXPatternParser.COMMA: - self.state = 212 - self.match(STIXPatternParser.COMMA) self.state = 213 + self.match(STIXPatternParser.COMMA) + self.state = 214 self.primitiveLiteral() - self.state = 218 + self.state = 219 self._errHandler.sync(self) _la = self._input.LA(1) - self.state = 219 + self.state = 220 self.match(STIXPatternParser.RPAREN) pass @@ -1872,17 +1873,17 @@ class STIXPatternParser ( Parser ): localctx = STIXPatternParser.PrimitiveLiteralContext(self, self._ctx, self.state) self.enterRule(localctx, 32, self.RULE_primitiveLiteral) try: - self.state = 225 + self.state = 226 self._errHandler.sync(self) token = self._input.LA(1) if token in [STIXPatternParser.IntNegLiteral, STIXPatternParser.IntPosLiteral, STIXPatternParser.FloatNegLiteral, STIXPatternParser.FloatPosLiteral, STIXPatternParser.HexLiteral, STIXPatternParser.BinaryLiteral, STIXPatternParser.StringLiteral, STIXPatternParser.TimestampLiteral]: self.enterOuterAlt(localctx, 1) - self.state = 223 + self.state = 224 self.orderableLiteral() pass elif token in [STIXPatternParser.BoolLiteral]: self.enterOuterAlt(localctx, 2) - self.state = 224 + self.state = 225 self.match(STIXPatternParser.BoolLiteral) pass else: @@ -1947,7 +1948,7 @@ class STIXPatternParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 227 + self.state = 228 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << STIXPatternParser.IntNegLiteral) | (1 << STIXPatternParser.IntPosLiteral) | (1 << STIXPatternParser.FloatNegLiteral) | (1 << STIXPatternParser.FloatPosLiteral) | (1 << STIXPatternParser.HexLiteral) | (1 << STIXPatternParser.BinaryLiteral) | (1 << STIXPatternParser.StringLiteral) | (1 << STIXPatternParser.TimestampLiteral))) != 0)): self._errHandler.recoverInline(self) @@ -1983,41 +1984,46 @@ class STIXPatternParser ( Parser ): def observationExpressions_sempred(self, localctx, predIndex): if predIndex == 0: return self.precpred(self._ctx, 2) - + def observationExpressionOr_sempred(self, localctx, predIndex): if predIndex == 1: return self.precpred(self._ctx, 2) - + def observationExpressionAnd_sempred(self, localctx, predIndex): if predIndex == 2: return self.precpred(self._ctx, 2) - + def observationExpression_sempred(self, localctx, predIndex): if predIndex == 3: return self.precpred(self._ctx, 3) - + if predIndex == 4: return self.precpred(self._ctx, 2) - + if predIndex == 5: return self.precpred(self._ctx, 1) - + def comparisonExpression_sempred(self, localctx, predIndex): if predIndex == 6: return self.precpred(self._ctx, 2) - + def comparisonExpressionAnd_sempred(self, localctx, predIndex): if predIndex == 7: return self.precpred(self._ctx, 2) - + def objectPathComponent_sempred(self, localctx, predIndex): if predIndex == 8: return self.precpred(self._ctx, 3) + + + + +
Validation fails on network-traffic:start If I run the following STIX pattern `"[network-traffic:start = '2018-04-20T12:36:24.558Z']"` it fails validation and gives the following error `"FAIL: Error found at line 1:17. no viable alternative at input 'network-traffic:start'"` The pattern passes validation if I use the end attribute instead of start.
oasis-open/cti-pattern-validator
diff --git a/stix2patterns/test/test_inspector.py b/stix2patterns/test/test_inspector.py index 612e812..bd251d8 100644 --- a/stix2patterns/test/test_inspector.py +++ b/stix2patterns/test/test_inspector.py @@ -5,17 +5,17 @@ from stix2patterns.pattern import Pattern @pytest.mark.parametrize(u"pattern,expected_qualifiers", [ (u"[foo:bar = 1]", set()), - (u"[foo:bar = 1] repeats 5 times", set([u"REPEATS 5 TIMES"])), - (u"[foo:bar = 1] within 10.3 seconds", set([u"WITHIN 10.3 SECONDS"])), - (u"[foo:bar = 1] within 123 seconds", set([u"WITHIN 123 SECONDS"])), - (u"[foo:bar = 1] start '1932-11-12T15:42:15Z' stop '1964-10-53T21:12:26Z'", + (u"[foo:bar = 1] REPEATS 5 TIMES", set([u"REPEATS 5 TIMES"])), + (u"[foo:bar = 1] WITHIN 10.3 SECONDS", set([u"WITHIN 10.3 SECONDS"])), + (u"[foo:bar = 1] WITHIN 123 SECONDS", set([u"WITHIN 123 SECONDS"])), + (u"[foo:bar = 1] START '1932-11-12T15:42:15Z' STOP '1964-10-53T21:12:26Z'", set([u"START '1932-11-12T15:42:15Z' STOP '1964-10-53T21:12:26Z'"])), - (u"[foo:bar = 1] repeats 1 times repeats 2 times", + (u"[foo:bar = 1] REPEATS 1 TIMES REPEATS 2 TIMES", set([u"REPEATS 1 TIMES", u"REPEATS 2 TIMES"])), - (u"[foo:bar = 1] repeats 1 times and [foo:baz = 2] within 1.23 seconds", + (u"[foo:bar = 1] REPEATS 1 TIMES AND [foo:baz = 2] WITHIN 1.23 SECONDS", set([u"REPEATS 1 TIMES", u"WITHIN 1.23 SECONDS"])), - (u"([foo:bar = 1] start '1932-11-12T15:42:15Z' stop '1964-10-53T21:12:26Z' and [foo:abc < h'12ab']) within 22 seconds " - u"or [frob:baz not in (1,2,3)] repeats 31 times", + (u"([foo:bar = 1] START '1932-11-12T15:42:15Z' STOP '1964-10-53T21:12:26Z' AND [foo:abc < h'12ab']) WITHIN 22 SECONDS " + u"OR [frob:baz NOT IN (1,2,3)] REPEATS 31 TIMES", set([u"START '1932-11-12T15:42:15Z' STOP '1964-10-53T21:12:26Z'", u"WITHIN 22 SECONDS", u"REPEATS 31 TIMES"])) ]) @@ -28,11 +28,11 @@ def test_qualifiers(pattern, expected_qualifiers): @pytest.mark.parametrize(u"pattern,expected_obs_ops", [ (u"[foo:bar = 1]", set()), - (u"[foo:bar = 1] and [foo:baz > 25.2]", set([u"AND"])), - (u"[foo:bar = 1] or [foo:baz != 'hello']", set([u"OR"])), - (u"[foo:bar = 1] followedby [foo:baz in (1,2,3)]", set([u"FOLLOWEDBY"])), - (u"[foo:bar = 1] and [foo:baz = 22] or [foo:abc = '123']", set([u"AND", u"OR"])), - (u"[foo:bar = 1] or ([foo:baz = false] followedby [frob:abc like '123']) within 46.1 seconds", + (u"[foo:bar = 1] AND [foo:baz > 25.2]", set([u"AND"])), + (u"[foo:bar = 1] OR [foo:baz != 'hello']", set([u"OR"])), + (u"[foo:bar = 1] FOLLOWEDBY [foo:baz IN (1,2,3)]", set([u"FOLLOWEDBY"])), + (u"[foo:bar = 1] AND [foo:baz = 22] OR [foo:abc = '123']", set([u"AND", u"OR"])), + (u"[foo:bar = 1] OR ([foo:baz = false] FOLLOWEDBY [frob:abc LIKE '123']) WITHIN 46.1 SECONDS", set([u"OR", u"FOLLOWEDBY"])) ]) def test_observation_ops(pattern, expected_obs_ops): @@ -44,22 +44,22 @@ def test_observation_ops(pattern, expected_obs_ops): @pytest.mark.parametrize(u"pattern,expected_comparisons", [ (u"[foo:bar = 1]", {u"foo": [([u"bar"], u"=", u"1")]}), - (u"[foo:bar=1 and foo:baz=2]", {u"foo": [([u"bar"], u"=", u"1"), ([u"baz"], u"=", u"2")]}), - (u"[foo:bar not !=1 or bar:foo<12.3]", { + (u"[foo:bar=1 AND foo:baz=2]", {u"foo": [([u"bar"], u"=", u"1"), ([u"baz"], u"=", u"2")]}), + (u"[foo:bar NOT !=1 OR bar:foo<12.3]", { u"foo": [([u"bar"], u"NOT !=", u"1")], u"bar": [([u"foo"], u"<", u"12.3")] }), - (u"[foo:bar=1] or [foo:baz matches '123\\\\d+']", { + (u"[foo:bar=1] OR [foo:baz MATCHES '123\\\\d+']", { u"foo": [([u"bar"], u"=", u"1"), ([u"baz"], u"MATCHES", u"'123\\\\d+'")] }), - (u"[foo:bar=1 and bar:foo not >33] repeats 12 times or " - u" ([baz:bar issubset '1234'] followedby [baz:quux not like 'a_cd'])", + (u"[foo:bar=1 AND bar:foo NOT >33] REPEATS 12 TIMES OR " + u" ([baz:bar ISSUBSET '1234'] FOLLOWEDBY [baz:quux NOT LIKE 'a_cd'])", { u"foo": [([u"bar"], u"=", u"1")], u"bar": [([u"foo"], u"NOT >", u"33")], u"baz": [([u"bar"], u"ISSUBSET", u"'1234'"), ([u"quux"], u"NOT LIKE", u"'a_cd'")] }), - (u"[obj-type:a.b[*][1].'c-d' not issuperset '1.2.3.4/16']", { + (u"[obj-type:a.b[*][1].'c-d' NOT ISSUPERSET '1.2.3.4/16']", { u"obj-type": [([u"a", u"b", INDEX_STAR, 1, u"c-d"], u"NOT ISSUPERSET", u"'1.2.3.4/16'")] }), ]) diff --git a/stix2patterns/test/test_validator.py b/stix2patterns/test/test_validator.py index f1fad90..5602fef 100644 --- a/stix2patterns/test/test_validator.py +++ b/stix2patterns/test/test_validator.py @@ -34,7 +34,7 @@ FAIL_CASES = [ ("[win-registry-key:key = 'hkey_local_machine\\\\foo\\\\bar'] WITHIN ]", # Missing Qualifier value "FAIL: Error found at line 1:63. mismatched input ']' expecting {IntPosLiteral, FloatPosLiteral}"), ("[win-registry-key:key = 'hkey_local_machine\\\\foo\\\\bar'] WITHIN 5 HOURS]", # SECONDS is the only valid time unit - "FAIL: Error found at line 1:65. mismatched input 'HOURS' expecting SECONDS"), + "FAIL: Error found at line 1:65. mismatched input 'HOURS' expecting 'SECONDS'"), ("[win-registry-key:key = 'hkey_local_machine\\\\foo\\\\bar'] WITHIN -5 SECONDS]", # Negative integer is invalid "FAIL: Error found at line 1:63. mismatched input '-5' expecting {IntPosLiteral, FloatPosLiteral}"), ("[network-traffic:dst_ref.value ISSUBSET ]", # Missing second Comparison operand @@ -45,6 +45,8 @@ FAIL_CASES = [ "FAIL: Error found at line 1:24. extraneous input 't'"), ("[artifact:payload_bin = b'====']", # Not valid Base64 "FAIL: Error found at line 1:24. extraneous input 'b'"), + ("[foo:bar=1] within 2 seconds", # keywords must be uppercase + "FAIL: Error found at line 1:12. mismatched input 'within' expecting <EOF>") # TODO: add more failing test cases. ] @@ -80,6 +82,7 @@ PASS_CASES = [ "[x_whatever:detected == t'2018-03-22T12:11:14.1Z']", "[artifact:payload_bin = b'dGhpcyBpcyBhIHRlc3Q=']", "[foo:bar=1] REPEATS 9 TIMES", + "[network-traffic:start = '2018-04-20T12:36:24.558Z']" ]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 4 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "coverage" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.7 attrs==22.2.0 Babel==2.11.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 coverage==6.2 distlib==0.3.9 docutils==0.18.1 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 nodeenv==1.6.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-prompt==1.5.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 -e git+https://github.com/oasis-open/cti-pattern-validator.git@801c2364013d3cc5529f5e0b967def7f505a91e4#egg=stix2_patterns toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.16.2 zipp==3.6.0
name: cti-pattern-validator channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.7 - attrs==22.2.0 - babel==2.11.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - coverage==6.2 - distlib==0.3.9 - docutils==0.18.1 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - nodeenv==1.6.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-prompt==1.5.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/cti-pattern-validator
[ "stix2patterns/test/test_validator.py::test_fail_patterns[[win-registry-key:key", "stix2patterns/test/test_validator.py::test_fail_patterns[[foo:bar=1]", "stix2patterns/test/test_validator.py::test_pass_patterns[[network-traffic:start" ]
[]
[ "stix2patterns/test/test_inspector.py::test_qualifiers[[foo:bar", "stix2patterns/test/test_inspector.py::test_qualifiers[([foo:bar", "stix2patterns/test/test_inspector.py::test_observation_ops[[foo:bar", "stix2patterns/test/test_inspector.py::test_comparisons[[foo:bar", "stix2patterns/test/test_inspector.py::test_comparisons[[foo:bar=1", "stix2patterns/test/test_inspector.py::test_comparisons[[foo:bar=1]", "stix2patterns/test/test_inspector.py::test_comparisons[[obj-type:a.b[*][1].'c-d'", "stix2patterns/test/test_validator.py::test_spec_patterns[[file:hashes.'SHA-256'", "stix2patterns/test/test_validator.py::test_spec_patterns[[email-message:from_ref.value", "stix2patterns/test/test_validator.py::test_spec_patterns[([file:hashes.MD5", "stix2patterns/test/test_validator.py::test_spec_patterns[[user-account:account_type", "stix2patterns/test/test_validator.py::test_spec_patterns[[artifact:mime_type", "stix2patterns/test/test_validator.py::test_spec_patterns[[file:name", "stix2patterns/test/test_validator.py::test_spec_patterns[[file:extensions.'windows-pebinary-ext'.sections[*].entropy", "stix2patterns/test/test_validator.py::test_spec_patterns[[file:mime_type", "stix2patterns/test/test_validator.py::test_spec_patterns[[network-traffic:dst_ref.type", "stix2patterns/test/test_validator.py::test_spec_patterns[[domain-name:value", "stix2patterns/test/test_validator.py::test_spec_patterns[[url:value", "stix2patterns/test/test_validator.py::test_spec_patterns[[x509-certificate:issuer", "stix2patterns/test/test_validator.py::test_spec_patterns[[windows-registry-key:key", "stix2patterns/test/test_validator.py::test_spec_patterns[[(file:name", "stix2patterns/test/test_validator.py::test_spec_patterns[[email-message:sender_ref.value", "stix2patterns/test/test_validator.py::test_spec_patterns[[x-usb-device:usbdrive.serial_number", "stix2patterns/test/test_validator.py::test_spec_patterns[[process:command_line", "stix2patterns/test/test_validator.py::test_spec_patterns[[network-traffic:dst_ref.value", "stix2patterns/test/test_validator.py::test_spec_patterns[([file:name", "stix2patterns/test/test_validator.py::test_fail_patterns[file:size", "stix2patterns/test/test_validator.py::test_fail_patterns[[file:size", "stix2patterns/test/test_validator.py::test_fail_patterns[[file:hashes.MD5", "stix2patterns/test/test_validator.py::test_fail_patterns[[file.size", "stix2patterns/test/test_validator.py::test_fail_patterns[[file:name", "stix2patterns/test/test_validator.py::test_fail_patterns[[network-traffic:dst_ref.value", "stix2patterns/test/test_validator.py::test_fail_patterns[[x_whatever:detected", "stix2patterns/test/test_validator.py::test_fail_patterns[[artifact:payload_bin", "stix2patterns/test/test_validator.py::test_pass_patterns[[file:size", "stix2patterns/test/test_validator.py::test_pass_patterns[[file:file_name", "stix2patterns/test/test_validator.py::test_pass_patterns[[file:extended_properties.'ntfs-ext'.sid", "stix2patterns/test/test_validator.py::test_pass_patterns[[emailaddr:value", "stix2patterns/test/test_validator.py::test_pass_patterns[[ipv4addr:value", "stix2patterns/test/test_validator.py::test_pass_patterns[[user-account:value", "stix2patterns/test/test_validator.py::test_pass_patterns[[file:file_system_properties.file_name", "stix2patterns/test/test_validator.py::test_pass_patterns[[network-connection:extended_properties[0].source_payload", "stix2patterns/test/test_validator.py::test_pass_patterns[[win-registry-key:key", "stix2patterns/test/test_validator.py::test_pass_patterns[[x_whatever:detected", "stix2patterns/test/test_validator.py::test_pass_patterns[[artifact:payload_bin", "stix2patterns/test/test_validator.py::test_pass_patterns[[foo:bar=1]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,785
[ "stix2patterns/grammars/STIXPatternListener.py", "stix2patterns/grammars/STIXPatternParser.py", ".pre-commit-config.yaml", "stix2patterns/grammars/STIXPatternLexer.py" ]
[ "stix2patterns/grammars/STIXPatternListener.py", "stix2patterns/grammars/STIXPatternParser.py", ".pre-commit-config.yaml", "stix2patterns/grammars/STIXPatternLexer.py" ]
pennmem__cmlreaders-114
dc66e6458b1b84ee6da9f5e9a2693a888450327c
2018-07-16 18:24:39
177d8508b999957ec1492ecaa7775179ad875454
diff --git a/cmlreaders/readers/eeg.py b/cmlreaders/readers/eeg.py index 709e3a5..4dbc94d 100644 --- a/cmlreaders/readers/eeg.py +++ b/cmlreaders/readers/eeg.py @@ -365,6 +365,10 @@ class EEGReader(BaseCMLReader): epochs = [(0, None)] epochs = DefaultTuple(epochs) + if not len(epochs): + raise ValueError("No events/epochs given! Hint: did filtering " + "events result in at least one?") + ts = [] for fileno, epoch_lst in itertools.groupby(epochs, key=lambda x: x[-1]):
Poor error message when loading EEG and events are empty Example: trying to filter events with a resulting DataFrame with no rows results in an `IndexError` when concatenating `TimeSeries` objects because none get added in the `as_timeseries` method. We should explicitly check for this case and raise a more helpful error message.
pennmem/cmlreaders
diff --git a/cmlreaders/test/test_eeg.py b/cmlreaders/test/test_eeg.py index b4c347e..700669a 100644 --- a/cmlreaders/test/test_eeg.py +++ b/cmlreaders/test/test_eeg.py @@ -403,3 +403,12 @@ class TestRereference: reader = EEGReader("eeg") eeg = reader.load(scheme=scheme) assert_equal(eeg.data[0], self.reref_data) + + [email protected] +class TestLoadEEG: + def test_load_with_empty_events(self): + reader = EEGReader("eeg") + + with pytest.raises(ValueError): + reader.as_timeseries(epochs=[])
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 cached-property==1.5.2 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/pennmem/cmlreaders.git@dc66e6458b1b84ee6da9f5e9a2693a888450327c#egg=cmlreaders codecov==2.1.13 coverage==6.2 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 execnet==1.9.0 flake8==3.9.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 MarkupSafe==2.0.1 mccabe==0.6.1 mistune==0.8.4 mne==0.23.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 pluggy==1.0.0 py==1.11.0 pycodestyle==2.7.0 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 zipp==3.6.0
name: cmlreaders channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cached-property==1.5.2 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - execnet==1.9.0 - flake8==3.9.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - markupsafe==2.0.1 - mccabe==0.6.1 - mistune==0.8.4 - mne==0.23.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cmlreaders
[ "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_with_empty_events" ]
[ "cmlreaders/test/test_eeg.py::TestFileReaders::test_split_eeg_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_split_eeg_reader_missing_contacts", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1363T-FR1-0-178-True]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1392N-PAL1-0-112-False]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[R1298E-87-CH88]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[R1387E-13-CH14]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_read_whole_session[R1161E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader_with_events[R1161E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader_with_events[R1387E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1384J-False-43-LS12-LS1]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1111M-True-43-LPOG23-LPOG31]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1384J-ind.region-insula-10-200]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1288P-ind.region-lateralorbitofrontal-5-200]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1111M-ind.region-middletemporal-18-100]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[RamulatorHDF5Reader-True]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[RamulatorHDF5Reader-False]" ]
[ "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_include_contact[True]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_include_contact[False]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_npy_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_rereference", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[SplitEEGReader-True]" ]
[]
null
2,786
[ "cmlreaders/readers/eeg.py" ]
[ "cmlreaders/readers/eeg.py" ]
fniessink__next-action-165
46f06f9138ef0ca532c7e7ecb281fbbc85880d46
2018-07-17 07:25:30
46f06f9138ef0ca532c7e7ecb281fbbc85880d46
diff --git a/CHANGELOG.md b/CHANGELOG.md index 16cf30d..b325977 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [Unreleased] - 2018-07-17 + +### Fixed + +- Give proper error message when the `--number` argument is smaller than one. Fixes #164. + ## [1.5.3] - 2018-07-14 ### Fixed diff --git a/README.md b/README.md index ffb50ba..b489018 100644 --- a/README.md +++ b/README.md @@ -448,9 +448,9 @@ To run the unit tests: ```console $ python -m unittest -............................................................................................................................................................................................................................................ +............................................................................................................................................................................................................................................. ---------------------------------------------------------------------- -Ran 236 tests in 2.931s +Ran 237 tests in 2.303s OK ``` @@ -461,9 +461,9 @@ To create the unit test coverage report run the unit tests under coverage with: ```console $ coverage run --branch -m unittest -............................................................................................................................................................................................................................................ +............................................................................................................................................................................................................................................. ---------------------------------------------------------------------- -Ran 236 tests in 3.557s +Ran 237 tests in 3.159s OK ``` @@ -475,7 +475,7 @@ $ coverage report --fail-under=100 --omit=".venv/*" --skip-covered Name Stmts Miss Branch BrPart Cover ----------------------------------------- ----------------------------------------- -TOTAL 1347 0 173 0 100% +TOTAL 1360 0 177 0 100% 25 files skipped due to complete coverage. ``` diff --git a/next_action/arguments/parser.py b/next_action/arguments/parser.py index 4d9df44..5d8c8ad 100644 --- a/next_action/arguments/parser.py +++ b/next_action/arguments/parser.py @@ -85,7 +85,7 @@ class NextActionArgumentParser(argparse.ArgumentParser): "-a", "--all", default=1, action="store_const", dest="number", const=sys.maxsize, help="show all next actions") number.add_argument( - "-n", "--number", metavar="<number>", type=int, default=1, + "-n", "--number", metavar="<number>", type=number_type, default=1, help="number of next actions to show (default: %(default)s)") def add_filter_arguments(self) -> None: @@ -257,6 +257,17 @@ def date_type(value: str) -> datetime.date: raise argparse.ArgumentTypeError("invalid date: {0}".format(value)) +def number_type(value: str) -> int: + """Return the value if it's positive, else raise an error.""" + try: + number = int(value) + if number > 0: + return number + except ValueError: + pass + raise argparse.ArgumentTypeError("invalid number: {0}".format(value)) + + def subset(filters: List[str], prefix: str) -> Set[str]: """Return a subset of the filters based on prefix.""" return set(f.strip(prefix) for f in filters if f.startswith(prefix))
Give error message when --number argument is a negative integer
fniessink/next-action
diff --git a/tests/unittests/arguments/test_parser.py b/tests/unittests/arguments/test_parser.py index fed6430..e606485 100644 --- a/tests/unittests/arguments/test_parser.py +++ b/tests/unittests/arguments/test_parser.py @@ -212,7 +212,16 @@ class NumberTest(ParserTestCase): """Test that the argument parser exits if the option is faulty.""" self.assertRaises(SystemExit, parse_arguments) self.assertEqual([call(USAGE_MESSAGE), - call("next-action: error: argument -n/--number: invalid int value: 'not_a_number'\n")], + call("next-action: error: argument -n/--number: invalid number: not_a_number\n")], + mock_stderr_write.call_args_list) + + @patch.object(sys, "argv", ["next-action", "--number", "-1"]) + @patch.object(sys.stderr, "write") + def test_negative_number(self, mock_stderr_write): + """Test that the argument parser exits if the option is faulty.""" + self.assertRaises(SystemExit, parse_arguments) + self.assertEqual([call(USAGE_MESSAGE), + call("next-action: error: argument -n/--number: invalid number: -1\n")], mock_stderr_write.call_args_list) @patch.object(sys, "argv", ["next-action", "--all"])
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 3 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "codacy-coverage", "pylint", "pydocstyle", "pycodestyle", "bandit", "mypy", "hypothesis" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 attrs==25.3.0 bandit==1.8.3 Cerberus==1.2 certifi==2025.1.31 charset-normalizer==3.4.1 codacy-coverage==1.3.11 coverage==7.8.0 dateparser==0.7.0 dill==0.3.9 exceptiongroup==1.2.2 hypothesis==6.130.5 idna==3.10 iniconfig==2.1.0 isort==6.0.1 markdown-it-py==3.0.0 mccabe==0.7.0 mdurl==0.1.2 mypy==1.15.0 mypy-extensions==1.0.0 -e git+https://github.com/fniessink/next-action.git@46f06f9138ef0ca532c7e7ecb281fbbc85880d46#egg=next_action packaging==24.2 pbr==6.1.1 platformdirs==4.3.7 pluggy==1.5.0 pycodestyle==2.13.0 pydocstyle==6.3.0 Pygments==2.19.1 pylint==3.3.6 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.2 regex==2024.11.6 requests==2.32.3 rich==14.0.0 six==1.17.0 snowballstemmer==2.2.0 sortedcontainers==2.4.0 stevedore==5.4.1 tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0 tzlocal==5.3.1 urllib3==2.3.0
name: next-action channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - attrs==25.3.0 - bandit==1.8.3 - cerberus==1.2 - certifi==2025.1.31 - charset-normalizer==3.4.1 - codacy-coverage==1.3.11 - coverage==7.8.0 - dateparser==0.7.0 - dill==0.3.9 - exceptiongroup==1.2.2 - hypothesis==6.130.5 - idna==3.10 - iniconfig==2.1.0 - isort==6.0.1 - markdown-it-py==3.0.0 - mccabe==0.7.0 - mdurl==0.1.2 - mypy==1.15.0 - mypy-extensions==1.0.0 - packaging==24.2 - pbr==6.1.1 - platformdirs==4.3.7 - pluggy==1.5.0 - pycodestyle==2.13.0 - pydocstyle==6.3.0 - pygments==2.19.1 - pylint==3.3.6 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.2 - regex==2024.11.6 - requests==2.32.3 - rich==14.0.0 - six==1.17.0 - snowballstemmer==2.2.0 - sortedcontainers==2.4.0 - stevedore==5.4.1 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 - tzlocal==5.3.1 - urllib3==2.3.0 prefix: /opt/conda/envs/next-action
[ "tests/unittests/arguments/test_parser.py::NumberTest::test_faulty_number", "tests/unittests/arguments/test_parser.py::NumberTest::test_negative_number" ]
[ "tests/unittests/arguments/test_parser.py::DueDateTest::test_due_date", "tests/unittests/arguments/test_parser.py::DueDateTest::test_invalid_date", "tests/unittests/arguments/test_parser.py::DueDateTest::test_too_long" ]
[ "tests/unittests/arguments/test_parser.py::NoArgumentTest::test_filename", "tests/unittests/arguments/test_parser.py::NoArgumentTest::test_filters", "tests/unittests/arguments/test_parser.py::NoArgumentTest::test_style", "tests/unittests/arguments/test_parser.py::FilenameTest::test_add_default_filename", "tests/unittests/arguments/test_parser.py::FilenameTest::test_add_filename_twice", "tests/unittests/arguments/test_parser.py::FilenameTest::test_default_and_non_default", "tests/unittests/arguments/test_parser.py::FilenameTest::test_filename_argument", "tests/unittests/arguments/test_parser.py::FilenameTest::test_home_folder_argument", "tests/unittests/arguments/test_parser.py::FilenameTest::test_long_filename_argument", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_contexts_and_projects", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_empty_context", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_empty_excluded_project", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_empty_project", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_exclude_context", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_exclude_project", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_faulty_option", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_include_exclude_context", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_include_exclude_project", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_invalid_extra_argument", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_multiple_contexts", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_multiple_projects", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_one_context", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_one_project", "tests/unittests/arguments/test_parser.py::FilterArgumentTest::test_project_after_excluded", "tests/unittests/arguments/test_parser.py::NumberTest::test_all_actions", "tests/unittests/arguments/test_parser.py::NumberTest::test_all_and_number", "tests/unittests/arguments/test_parser.py::NumberTest::test_default_number", "tests/unittests/arguments/test_parser.py::NumberTest::test_number", "tests/unittests/arguments/test_parser.py::DueDateTest::test_default", "tests/unittests/arguments/test_parser.py::DueDateTest::test_faulty_date", "tests/unittests/arguments/test_parser.py::DueDateTest::test_no_due_date", "tests/unittests/arguments/test_parser.py::ReferenceTest::test_always", "tests/unittests/arguments/test_parser.py::ReferenceTest::test_default", "tests/unittests/arguments/test_parser.py::ReferenceTest::test_faulty_date", "tests/unittests/arguments/test_parser.py::ReferenceTest::test_multiple", "tests/unittests/arguments/test_parser.py::ReferenceTest::test_never" ]
[]
Apache License 2.0
2,788
[ "next_action/arguments/parser.py", "README.md", "CHANGELOG.md" ]
[ "next_action/arguments/parser.py", "README.md", "CHANGELOG.md" ]
level12__keg-elements-82
1d0f3d854820ca888ea4c0020af9e7f4d46a6c6a
2018-07-18 12:18:50
2b0ee8adedd6e21ffa426a5abf3adb9c79706abe
diff --git a/docker-entry b/docker-entry index b30f472..f933af4 100644 --- a/docker-entry +++ b/docker-entry @@ -22,7 +22,7 @@ export PIP_DISABLE_PIP_VERSION_CHECK=1 # b/c it gives me control over what version of tox I'm using without having to rebuild the # docker image. python3.5 -m pip install --upgrade --force-reinstall \ - --quiet --use-wheel --no-index --find-links=requirements/wheelhouse tox + --quiet --no-index --find-links=requirements/wheelhouse tox # Run the tests using our target python version. # diff --git a/keg_elements/forms/validators.py b/keg_elements/forms/validators.py index 35be03d..d060dc5 100644 --- a/keg_elements/forms/validators.py +++ b/keg_elements/forms/validators.py @@ -4,6 +4,34 @@ from decimal import Decimal import jinja2 from wtforms import ValidationError +import re + + +class ValidateAlphaNumeric(object): + """ + A validator to make sure than a form field contains only alphanumeric data + + usage example: + import keg_elements.forms.validators as validators + + wtforms.StringField('AlphaNumeric', validators=[ + validators.ValidateAlphaNumeric() + ]) + """ + regex = re.compile(r'^[a-zA-Z0-9]+$') + + def __init__(self, message=None): + self.message = message + + def __call__(self, form, field): + value = field.data + + message = self.message + if message is None: + message = field.gettext("Must only contain alphanumeric data.") + + if not self.regex.match(value): + raise ValidationError(message) def numeric(form, field):
Add an alphanumeric validator Currently there is no validator for alphanumeric (Characters and numbers only). It would be nice to have one for reuse.
level12/keg-elements
diff --git a/keg_elements/tests/test_forms/test_validators.py b/keg_elements/tests/test_forms/test_validators.py index e390e5d..93c4a9f 100644 --- a/keg_elements/tests/test_forms/test_validators.py +++ b/keg_elements/tests/test_forms/test_validators.py @@ -1,3 +1,4 @@ +# coding=utf-8 import jinja2 import pytest import wtforms @@ -65,3 +66,58 @@ class TestUniqueValidator(object): assert str(exc.value) == ( 'Form must provide either `obj` or `_obj` property for uniqueness validation.' ) + + +class TestAlphaNumericValidator(object): + + def test_alphanumeric(self): + message = 'Must only contain alphanumeric data.' + + class AlphaNumericMockForm(wtforms.Form): + alpha_numeric_field = wtforms.StringField('AlphaNumeric', validators=[ + validators.ValidateAlphaNumeric() + ]) + + form = AlphaNumericMockForm(alpha_numeric_field='123456asb') + form.validate() + assert form.errors == {} + + form = AlphaNumericMockForm(alpha_numeric_field='123456') + form.validate() + assert form.errors == {} + + form = AlphaNumericMockForm(alpha_numeric_field='abcd') + form.validate() + assert form.errors == {} + + form = AlphaNumericMockForm(alpha_numeric_field='123456!') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field='123456!') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field='!212') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field=' ') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field='123dsaf4 ') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field=' 123afd4') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field='1 23fdas4') + form.validate() + assert form.errors['alpha_numeric_field'] == [message] + + form = AlphaNumericMockForm(alpha_numeric_field=u'åfasdf') + form.validate() + assert form.errors['alpha_numeric_field'] == [message]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 2 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/runtime.txt", "requirements/testing.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
appdirs==1.4.4 arrow==1.3.0 beautifulsoup4==4.13.3 BlazeUtils==0.7.0 blinker==1.9.0 cffi==1.17.1 click==8.1.8 coverage==7.8.0 cryptography==44.0.2 cssselect==1.3.0 exceptiongroup==1.2.2 flake8==7.2.0 Flask==3.1.0 Flask-SQLAlchemy==3.1.1 Flask-WebTest==0.1.6 Flask-WTF==1.2.2 greenlet==3.1.1 importlib_metadata==8.6.1 infinity==1.5 iniconfig==2.1.0 intervals==0.9.2 itsdangerous==2.2.0 Jinja2==3.1.6 Keg==0.11.1 -e git+https://github.com/level12/keg-elements.git@1d0f3d854820ca888ea4c0020af9e7f4d46a6c6a#egg=KegElements lxml==5.3.1 MarkupSafe==3.0.2 mccabe==0.7.0 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pycparser==2.22 pyflakes==3.3.2 pyquery==2.0.1 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 raven==6.10.0 six==1.17.0 soupsieve==2.6 SQLAlchemy==2.0.40 SQLAlchemy-Utils==0.41.2 tomli==2.2.1 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 validators==0.34.0 waitress==3.0.2 WebOb==1.8.9 WebTest==3.0.4 Werkzeug==3.1.3 wrapt==1.17.2 WTForms==3.2.1 WTForms-Alchemy==0.19.0 WTForms-Components==0.11.0 zipp==3.21.0
name: keg-elements channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - appdirs==1.4.4 - arrow==1.3.0 - beautifulsoup4==4.13.3 - blazeutils==0.7.0 - blinker==1.9.0 - cffi==1.17.1 - click==8.1.8 - coverage==7.8.0 - cryptography==44.0.2 - cssselect==1.3.0 - exceptiongroup==1.2.2 - flake8==7.2.0 - flask==3.1.0 - flask-sqlalchemy==3.1.1 - flask-webtest==0.1.6 - flask-wtf==1.2.2 - greenlet==3.1.1 - importlib-metadata==8.6.1 - infinity==1.5 - iniconfig==2.1.0 - intervals==0.9.2 - itsdangerous==2.2.0 - jinja2==3.1.6 - keg==0.11.1 - lxml==5.3.1 - markupsafe==3.0.2 - mccabe==0.7.0 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pycparser==2.22 - pyflakes==3.3.2 - pyquery==2.0.1 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - raven==6.10.0 - six==1.17.0 - soupsieve==2.6 - sqlalchemy==2.0.40 - sqlalchemy-utils==0.41.2 - tomli==2.2.1 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - validators==0.34.0 - waitress==3.0.2 - webob==1.8.9 - webtest==3.0.4 - werkzeug==3.1.3 - wrapt==1.17.2 - wtforms==3.2.1 - wtforms-alchemy==0.19.0 - wtforms-components==0.11.0 - zipp==3.21.0 prefix: /opt/conda/envs/keg-elements
[ "keg_elements/tests/test_forms/test_validators.py::TestAlphaNumericValidator::test_alphanumeric" ]
[ "keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_validation_passes", "keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_validation_fails", "keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_no_object_link_provided" ]
[ "keg_elements/tests/test_forms/test_validators.py::TestUniqueValidator::test_get_obj" ]
[]
BSD License
2,789
[ "keg_elements/forms/validators.py", "docker-entry" ]
[ "keg_elements/forms/validators.py", "docker-entry" ]
pydicom__pydicom-680
20aa7acc2ec28d0c10974611d1f00a398e543de5
2018-07-18 20:05:46
0721bdc0b5797f40984cc55b5408e273328dc528
pep8speaks: Hello @mrbean-bremen! Thanks for submitting the PR. - In the file [`pydicom/dataelem.py`](https://github.com/pydicom/pydicom/blob/716b8687b557d5bb165a8178dadd01201a39e55d/pydicom/dataelem.py), following are the PEP8 issues : > [Line 203:17](https://github.com/pydicom/pydicom/blob/716b8687b557d5bb165a8178dadd01201a39e55d/pydicom/dataelem.py#L203): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent > [Line 242:80](https://github.com/pydicom/pydicom/blob/716b8687b557d5bb165a8178dadd01201a39e55d/pydicom/dataelem.py#L242): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters) - In the file [`pydicom/tests/test_filewriter.py`](https://github.com/pydicom/pydicom/blob/716b8687b557d5bb165a8178dadd01201a39e55d/pydicom/tests/test_filewriter.py), following are the PEP8 issues : > [Line 367:80](https://github.com/pydicom/pydicom/blob/716b8687b557d5bb165a8178dadd01201a39e55d/pydicom/tests/test_filewriter.py#L367): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters) > [Line 369:80](https://github.com/pydicom/pydicom/blob/716b8687b557d5bb165a8178dadd01201a39e55d/pydicom/tests/test_filewriter.py#L369): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
diff --git a/doc/release-notes.rst b/doc/release-notes.rst index 8d8889dc6..a9b32d0bb 100644 --- a/doc/release-notes.rst +++ b/doc/release-notes.rst @@ -4,6 +4,8 @@ Release history =============== +.. include:: whatsnew/v1.2.0.rst + .. include:: whatsnew/v1.1.0.rst .. include:: whatsnew/v1.0.0.rst diff --git a/doc/whatsnew/v1.2.0.rst b/doc/whatsnew/v1.2.0.rst index 40f6b0a47..9ea652663 100644 --- a/doc/whatsnew/v1.2.0.rst +++ b/doc/whatsnew/v1.2.0.rst @@ -4,9 +4,17 @@ Version 1.2.0 Changelog --------- + +Enhancements +............ + +* Added possibility to set byte strings as value for VRs that use only the + default character set (:issue:`624`) + * ``DeferredDataElement`` class deprecated and will be removed in v1.3 (:issue:`291`) + Fixes ..... diff --git a/pydicom/dataelem.py b/pydicom/dataelem.py index 1c4e8e4a9..c6b15e4b1 100644 --- a/pydicom/dataelem.py +++ b/pydicom/dataelem.py @@ -52,6 +52,11 @@ def isString(val): return isinstance(val, compat.string_types) +def _is_bytes(val): + """Return True only in Python 3 if `val` is of type `bytes`.""" + return False if in_py2 else isinstance(val, bytes) + + def isStringOrStringList(val): """Return True if `val` is a str or an iterable containing only strings.""" @@ -233,6 +238,14 @@ class DataElement(object): def _convert(self, val): """Convert `val` to an appropriate type for the element's VR.""" + + # If the value is a byte string and has a VR that can only be encoded + # using the default character repertoire, we convert it to a string + # here to allow for byte string input in these cases + if _is_bytes(val) and self.VR in ( + 'AE', 'AS', 'CS', 'DA', 'DS', 'DT', 'IS', 'TM', 'UI', 'UR'): + val = val.decode() + if self.VR == 'IS': return pydicom.valuerep.IS(val) elif self.VR == 'DA' and config.datetime_conversion:
"bytes" representation for AE fields not supported #### Description Fields with VR of AE in datasets, should support getting a `bytes` object. Perhaps, this should be even the default representation thereof, as they truly support only ASCII and not real strings. Instead, when attempting to serialize a dataset with a `bytes` value, an error is raised. #### Steps/Code to Reproduce ``` ds = Dataset() ds.StationAETitle = b"abc" ds.is_implicit_VR = True ds.is_little_endian = True write_file("/tmp/a.dcm", ds) ``` #### Versions python 3.6.4 pydicom 1.0.1
pydicom/pydicom
diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py index 4b943d651..6c0f11f82 100644 --- a/pydicom/tests/test_filewriter.py +++ b/pydicom/tests/test_filewriter.py @@ -316,7 +316,7 @@ class WriteDataElementTests(unittest.TestCase): self.check_data_element(data_elem, expected) def test_write_multi_DA(self): - data_elem = DataElement(0x0014407E, 'DA', ['20100101', '20101231']) + data_elem = DataElement(0x0014407E, 'DA', ['20100101', b'20101231']) expected = (b'\x14\x00\x7E\x40' # tag b'\x12\x00\x00\x00' # length b'20100101\\20101231 ') # padded value @@ -331,11 +331,13 @@ class WriteDataElementTests(unittest.TestCase): b'\x06\x00\x00\x00' # length b'010203') # padded value self.check_data_element(data_elem, expected) + data_elem = DataElement(0x00080030, 'TM', b'010203') + self.check_data_element(data_elem, expected) data_elem = DataElement(0x00080030, 'TM', time(1, 2, 3)) self.check_data_element(data_elem, expected) def test_write_multi_TM(self): - data_elem = DataElement(0x0014407C, 'TM', ['082500', '092655']) + data_elem = DataElement(0x0014407C, 'TM', ['082500', b'092655']) expected = (b'\x14\x00\x7C\x40' # tag b'\x0E\x00\x00\x00' # length b'082500\\092655 ') # padded value @@ -350,21 +352,55 @@ class WriteDataElementTests(unittest.TestCase): b'\x0E\x00\x00\x00' # length b'20170101120000') # value self.check_data_element(data_elem, expected) + data_elem = DataElement(0x0008002A, 'DT', b'20170101120000') + self.check_data_element(data_elem, expected) data_elem = DataElement(0x0008002A, 'DT', datetime(2017, 1, 1, 12)) self.check_data_element(data_elem, expected) def test_write_multi_DT(self): data_elem = DataElement(0x0040A13A, 'DT', - ['20120820120804', '20130901111111']) + ['20120820120804', b'20130901111111']) expected = (b'\x40\x00\x3A\xA1' # tag b'\x1E\x00\x00\x00' # length b'20120820120804\\20130901111111 ') # padded value self.check_data_element(data_elem, expected) + data_elem = DataElement( + 0x0040A13A, 'DT', u'20120820120804\\20130901111111') + self.check_data_element(data_elem, expected) + data_elem = DataElement( + 0x0040A13A, 'DT', b'20120820120804\\20130901111111') + self.check_data_element(data_elem, expected) + data_elem = DataElement(0x0040A13A, 'DT', [datetime(2012, 8, 20, 12, 8, 4), datetime(2013, 9, 1, 11, 11, 11)]) self.check_data_element(data_elem, expected) + def test_write_ascii_vr_with_padding(self): + expected = (b'\x08\x00\x54\x00' # tag + b'\x0C\x00\x00\x00' # length + b'CONQUESTSRV ') # padded value + data_elem = DataElement(0x00080054, 'AE', 'CONQUESTSRV') + self.check_data_element(data_elem, expected) + data_elem = DataElement(0x00080054, 'AE', b'CONQUESTSRV') + self.check_data_element(data_elem, expected) + + expected = (b'\x08\x00\x62\x00' # tag + b'\x06\x00\x00\x00' # length + b'1.2.3\x00') # padded value + data_elem = DataElement(0x00080062, 'UI', '1.2.3') + self.check_data_element(data_elem, expected) + data_elem = DataElement(0x00080062, 'UI', b'1.2.3') + self.check_data_element(data_elem, expected) + + expected = (b'\x08\x00\x60\x00' # tag + b'\x04\x00\x00\x00' # length + b'REG ') + data_elem = DataElement(0x00080060, 'CS', 'REG') + self.check_data_element(data_elem, expected) + data_elem = DataElement(0x00080060, 'CS', b'REG') + self.check_data_element(data_elem, expected) + def test_write_OD_implicit_little(self): """Test writing elements with VR of OD works correctly.""" # VolumetricCurvePoints
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 3 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/pydicom/pydicom.git@20aa7acc2ec28d0c10974611d1f00a398e543de5#egg=pydicom pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: pydicom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/pydicom
[ "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DT", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_TM", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_ascii_vr_with_padding", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DA", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DT", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_TM" ]
[ "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raw_elements_preserved_implicit_vr", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raw_elements_preserved_explicit_vr", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_changed_character_set" ]
[ "pydicom/tests/test_filewriter.py::WriteFileTests::testCT", "pydicom/tests/test_filewriter.py::WriteFileTests::testJPEG2000", "pydicom/tests/test_filewriter.py::WriteFileTests::testListItemWriteBack", "pydicom/tests/test_filewriter.py::WriteFileTests::testMR", "pydicom/tests/test_filewriter.py::WriteFileTests::testMultiPN", "pydicom/tests/test_filewriter.py::WriteFileTests::testRTDose", "pydicom/tests/test_filewriter.py::WriteFileTests::testRTPlan", "pydicom/tests/test_filewriter.py::WriteFileTests::testUnicode", "pydicom/tests/test_filewriter.py::WriteFileTests::test_write_double_filemeta", "pydicom/tests/test_filewriter.py::WriteFileTests::test_write_ffff_ffff", "pydicom/tests/test_filewriter.py::WriteFileTests::test_write_no_ts", "pydicom/tests/test_filewriter.py::WriteFileTests::test_write_removes_grouplength", "pydicom/tests/test_filewriter.py::WriteFileTests::testwrite_short_uid", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testCT", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testJPEG2000", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testListItemWriteBack", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMR", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMultiPN", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTDose", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTPlan", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testUnicode", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_multivalue_DA", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_double_filemeta", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_ffff_ffff", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_no_ts", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_removes_grouplength", "pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testwrite_short_uid", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_empty_AT", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DA", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_explicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_implicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_explicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_implicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_explicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_implicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UN_implicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_explicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_implicit_little", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_empty_LO", "pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_unknown_vr_raises", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_lut_descriptor", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_overlay", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_data", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_one", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_three", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_sequence", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_waveform_bits_allocated", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_not_ambiguous", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_not_ambiguous_raw_data_element", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_correct_ambiguous_data_element", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_correct_ambiguous_raw_data_element", "pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_pixel_data_not_ow_or_ob", "pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_big_endian", "pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_little_endian", "pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_raises", "pydicom/tests/test_filewriter.py::ScratchWriteTests::testImpl_LE_deflen_write", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_default", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_custom", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_no_preamble", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_none_preamble", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_bad_preamble", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix_none", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_ds_changed", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_implicit_to_explicit_vr", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_dataset", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_dataset_with_explicit_vr", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_implicit_to_explicit_vr_using_destination", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_explicit_to_implicit_vr", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_big_to_little_endian", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_little_to_big_endian", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_added", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_private_tag_vr_from_implicit_data", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_rgb_from_implicit_to_explicit_vr", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_not_added", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_raises", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_media_storage_sop_class_uid_added", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_no_file_meta", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raise_no_file_meta", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_add_file_meta", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_standard", "pydicom/tests/test_filewriter.py::TestWriteToStandard::test_commandset_no_written", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_bad_elements", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_missing_elements", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length_updated", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_version", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_version_name_length", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_class_uid_length", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_filelike_position", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_ds_unchanged", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_file_meta_unchanged", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_filemeta_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_no_preamble", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_custom", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_default", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_filemeta_dataset", "pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_read_write_identical", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_bad_elements", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_filelike_position", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_group_length_updated", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_meta_unchanged", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_missing_elements", "pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_transfer_syntax_not_added", "pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_empty_value", "pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_list", "pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_singleton", "pydicom/tests/test_filewriter.py::TestWriteNumbers::test_exception", "pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_big_endian", "pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding_unicode", "pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding", "pydicom/tests/test_filewriter.py::TestWriteDT::test_format_dt", "pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_correct_data", "pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_incorrect_data", "pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_correct_data", "pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_incorrect_data" ]
[]
MIT License
2,790
[ "pydicom/dataelem.py", "doc/whatsnew/v1.2.0.rst", "doc/release-notes.rst" ]
[ "pydicom/dataelem.py", "doc/whatsnew/v1.2.0.rst", "doc/release-notes.rst" ]
google__mobly-472
9c5b19e1b932888d3e347fa529b82f5417b28a5e
2018-07-19 00:05:12
95286a01a566e056d44acfa9577a45bc7f37f51d
diff --git a/mobly/config_parser.py b/mobly/config_parser.py index aa43c03..b873af0 100644 --- a/mobly/config_parser.py +++ b/mobly/config_parser.py @@ -16,6 +16,7 @@ from builtins import str import copy import io +import pprint import os import yaml @@ -189,4 +190,7 @@ class TestRunConfig(object): return copy.deepcopy(self) def __str__(self): - return str(self.__dict__) + content = dict(self.__dict__) + content.pop('summary_writer') + content.pop('register_controller') + return pprint.pformat(content) diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py index 504f671..4d89af8 100644 --- a/mobly/controllers/android_device.py +++ b/mobly/controllers/android_device.py @@ -436,8 +436,9 @@ class AndroidDevice(object): self._log_path = os.path.join(self._log_path_base, 'AndroidDevice%s' % self._serial) self._debug_tag = self._serial - self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), - {'tag': self.debug_tag}) + self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), { + 'tag': self.debug_tag + }) self.sl4a = None self.ed = None self._adb_logcat_process = None @@ -783,15 +784,7 @@ class AndroidDevice(object): @property def is_rootable(self): - """If the build type is 'user', the device is not rootable. - - Other possible build types are 'userdebug' and 'eng', both are rootable. - We are checking the last four chars of the clean stdout because the - stdout of the adb command could be polluted with other info like adb - server startup message. - """ - build_type_output = self.adb.getprop('ro.build.type').lower() - return build_type_output[-4:] != 'user' + return self.adb.getprop('ro.debuggable') == '1' @property def model(self): diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py index 279360d..498434d 100644 --- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py +++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py @@ -19,7 +19,7 @@ The JSON protocol expected by this module is: Request: { - "id": <monotonically increasing integer containing the ID of + "id": <monotonically increasing integer containing the ID of this request> "method": <string containing the name of the method to execute> "params": <JSON array containing the arguments to the method> @@ -239,6 +239,7 @@ class JsonRpcClientBase(object): try: self._client.write(msg.encode("utf8") + b'\n') self._client.flush() + self.log.debug('Snippet sent %s.', msg) except socket.error as e: raise Error( self._ad, @@ -255,7 +256,9 @@ class JsonRpcClientBase(object): Error: a socket error occurred during the read. """ try: - return self._client.readline() + response = self._client.readline() + self.log.debug('Snippet received: %s', response) + return response except socket.error as e: raise Error( self._ad, @@ -299,7 +302,7 @@ class JsonRpcClientBase(object): if not response: raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_SERVER) - result = json.loads(str(response, encoding="utf8")) + result = json.loads(str(response, encoding='utf8')) if result['error']: raise ApiError(self._ad, result['error']) if result['id'] != apiid:
Log the contents of config file at the debug level early This helps in debugging remote user's malformed json/yaml or configs that don't adhere to schema.
google/mobly
diff --git a/mobly/test_runner.py b/mobly/test_runner.py index af834e8..e4fa9d8 100644 --- a/mobly/test_runner.py +++ b/mobly/test_runner.py @@ -381,8 +381,9 @@ class TestRunner(object): test_class: class, test class to execute. tests: Optional list of test names within the class to execute. """ - with test_class(config) as test_instance: + logging.debug('Executing test class "%s" with config: %s', + test_class.__name__, config) try: cls_result = test_instance.run(tests) self.results += cls_result diff --git a/tests/mobly/config_parser_test.py b/tests/mobly/config_parser_test.py index ea16668..44dfbdc 100644 --- a/tests/mobly/config_parser_test.py +++ b/tests/mobly/config_parser_test.py @@ -56,6 +56,11 @@ class OutputTest(unittest.TestCase): config = config_parser._load_config_file(tmp_file_path) self.assertEqual(config['TestBeds'][0]['Name'], u'\u901a') + def test_run_config_type(self): + config = config_parser.TestRunConfig() + self.assertNotIn('summary_writer', str(config)) + self.assertNotIn('register_controller', str(config)) + if __name__ == "__main__": unittest.main() diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py index e12e511..eb5129a 100755 --- a/tests/mobly/controllers/android_device_test.py +++ b/tests/mobly/controllers/android_device_test.py @@ -706,7 +706,8 @@ class AndroidDeviceTest(unittest.TestCase): self, MockFastboot, MockAdbProxy): mock_serial = '1' mock_adb_proxy = MockAdbProxy.return_value - mock_adb_proxy.getprop.return_value = 'userdebug' + # Set getprop to return '1' to indicate the device is rootable. + mock_adb_proxy.getprop.return_value = '1' mock_adb_proxy.has_shell_command.side_effect = lambda command: { 'logpersist.start': True, 'logpersist.stop': True, }[command]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 3 }
1.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y adb" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work future==1.0.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/google/mobly.git@9c5b19e1b932888d3e347fa529b82f5417b28a5e#egg=mobly mock==1.0.1 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work portpicker==1.6.0 psutil==7.0.0 pyserial==3.5 pytest @ file:///croot/pytest_1738938843180/work pytz==2025.2 PyYAML==6.0.2 timeout-decorator==0.5.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: mobly channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - future==1.0.0 - mock==1.0.1 - portpicker==1.6.0 - psutil==7.0.0 - pyserial==3.5 - pytz==2025.2 - pyyaml==6.0.2 - timeout-decorator==0.5.0 prefix: /opt/conda/envs/mobly
[ "tests/mobly/config_parser_test.py::OutputTest::test_run_config_type" ]
[ "tests/mobly/config_parser_test.py::OutputTest::test__load_config_file", "tests/mobly/config_parser_test.py::OutputTest::test__load_config_file_with_unicode" ]
[ "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_logpersist", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_all_logpersist", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_logpersist_start", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice__enable_logpersist_with_missing_logpersist_stop", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log_with_unicode", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_no_log_exists", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_existing_file", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_service", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_device_info", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_fail_cleanup_also_fail", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_failure", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_precheck_failure", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_start_app_fails", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_serial_is_valid", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_with_destination", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_unload_snippet", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial_with_service_running", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_no_match", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_success_with_extra_field", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads", "tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads_skip_logcat" ]
[]
Apache License 2.0
2,791
[ "mobly/controllers/android_device.py", "mobly/controllers/android_device_lib/jsonrpc_client_base.py", "mobly/config_parser.py" ]
[ "mobly/controllers/android_device.py", "mobly/controllers/android_device_lib/jsonrpc_client_base.py", "mobly/config_parser.py" ]
pydicom__pydicom-683
de87f4694ddac7bdb8e20effd82b22eee0d104c0
2018-07-19 03:10:53
0721bdc0b5797f40984cc55b5408e273328dc528
darcymason: Looks good. Reminder that this requires #139 to be in place for v1.2 (already labeled as such).
diff --git a/doc/whatsnew/v1.2.0.rst b/doc/whatsnew/v1.2.0.rst index 0def5e071..40f6b0a47 100644 --- a/doc/whatsnew/v1.2.0.rst +++ b/doc/whatsnew/v1.2.0.rst @@ -4,6 +4,9 @@ Version 1.2.0 Changelog --------- +* ``DeferredDataElement`` class deprecated and will be removed in v1.3 + (:issue:`291`) + Fixes ..... diff --git a/pydicom/dataelem.py b/pydicom/dataelem.py index 2b0c6dd01..1c4e8e4a9 100644 --- a/pydicom/dataelem.py +++ b/pydicom/dataelem.py @@ -13,6 +13,7 @@ A DataElement has a tag, # from __future__ import absolute_import from collections import namedtuple +import warnings from pydicom import config # don't import datetime_conversion directly from pydicom import compat @@ -400,6 +401,12 @@ class DeferredDataElement(DataElement): data_element_tell -- file position at start of data element, (not the start of the value part, but start of whole element) """ + warnings.warn( + "DeferredDataElement is deprecated and will be removed in " + "pydicom v1.3", + DeprecationWarning + ) + if not isinstance(tag, BaseTag): tag = Tag(tag) self.tag = tag
DeferredDataElement deprecation I've been looking through the code in `dataelem.py` and noticed that `DeferredDataElement` seems to be broken. The `value` getter calls `self.read_value()` which doesn't exist, as far as I can tell. The class doesn't seem to be used anywhere, doesn't have test coverage and hasn't changed since you moved to github, so I was wondering if it's still supported or if it should be removed?
pydicom/pydicom
diff --git a/pydicom/tests/test_dataelem.py b/pydicom/tests/test_dataelem.py index ce233a548..f39ca068d 100644 --- a/pydicom/tests/test_dataelem.py +++ b/pydicom/tests/test_dataelem.py @@ -13,9 +13,15 @@ import unittest import pytest from pydicom.charset import default_encoding -from pydicom.dataelem import (DataElement, RawDataElement, - DataElement_from_raw, isStringOrStringList) +from pydicom.dataelem import ( + DataElement, + RawDataElement, + DataElement_from_raw, + isStringOrStringList, + DeferredDataElement +) from pydicom.dataset import Dataset +from pydicom.filebase import DicomBytesIO from pydicom.tag import Tag from pydicom.uid import UID from pydicom.valuerep import DSfloat @@ -375,5 +381,10 @@ class RawDataElementTests(unittest.TestCase): DataElement_from_raw(raw, default_encoding) -if __name__ == "__main__": - unittest.main() +def test_deferred_data_element_deprecated(): + """Test the deprecation warning is working""" + fp = DicomBytesIO() + fp.is_little_endian = True + fp.is_implicit_VR = True + with pytest.deprecated_call(): + elem = DeferredDataElement(0x00000000, 'UL', fp, 0, 0, 4)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 importlib-metadata==4.8.3 iniconfig==1.1.1 numpy==1.19.5 packaging==21.3 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/pydicom/pydicom.git@de87f4694ddac7bdb8e20effd82b22eee0d104c0#egg=pydicom pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: pydicom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/pydicom
[ "pydicom/tests/test_dataelem.py::test_deferred_data_element_deprecated" ]
[]
[ "pydicom/tests/test_dataelem.py::test_is_string_like", "pydicom/tests/test_dataelem.py::DataElementTests::testBackslash", "pydicom/tests/test_dataelem.py::DataElementTests::testDSFloatConversion", "pydicom/tests/test_dataelem.py::DataElementTests::testEqualityInheritance", "pydicom/tests/test_dataelem.py::DataElementTests::testEqualityNotElement", "pydicom/tests/test_dataelem.py::DataElementTests::testEqualityPrivateElement", "pydicom/tests/test_dataelem.py::DataElementTests::testEqualitySequenceElement", "pydicom/tests/test_dataelem.py::DataElementTests::testEqualityStandardElement", "pydicom/tests/test_dataelem.py::DataElementTests::testHash", "pydicom/tests/test_dataelem.py::DataElementTests::testKeyword", "pydicom/tests/test_dataelem.py::DataElementTests::testRetired", "pydicom/tests/test_dataelem.py::DataElementTests::testUID", "pydicom/tests/test_dataelem.py::DataElementTests::testVM1", "pydicom/tests/test_dataelem.py::DataElementTests::testVM2", "pydicom/tests/test_dataelem.py::DataElementTests::test_description_group_length", "pydicom/tests/test_dataelem.py::DataElementTests::test_description_unknown", "pydicom/tests/test_dataelem.py::DataElementTests::test_description_unknown_private", "pydicom/tests/test_dataelem.py::DataElementTests::test_equality_class_members", "pydicom/tests/test_dataelem.py::DataElementTests::test_getitem_raises", "pydicom/tests/test_dataelem.py::DataElementTests::test_inequality_sequence", "pydicom/tests/test_dataelem.py::DataElementTests::test_inequality_standard", "pydicom/tests/test_dataelem.py::DataElementTests::test_repeater_str", "pydicom/tests/test_dataelem.py::DataElementTests::test_repr_seq", "pydicom/tests/test_dataelem.py::DataElementTests::test_str_no_vr", "pydicom/tests/test_dataelem.py::RawDataElementTests::testKeyError", "pydicom/tests/test_dataelem.py::RawDataElementTests::testTagWithoutEncodingPython3", "pydicom/tests/test_dataelem.py::RawDataElementTests::testValidTag", "pydicom/tests/test_dataelem.py::RawDataElementTests::test_unknown_vr" ]
[]
MIT License
2,792
[ "pydicom/dataelem.py", "doc/whatsnew/v1.2.0.rst" ]
[ "pydicom/dataelem.py", "doc/whatsnew/v1.2.0.rst" ]
oauthlib__oauthlib-564
371029906aa6ccc9943120e096a7bbdd0ef6945d
2018-07-19 16:47:27
e9c6f01bc6f89e6b90f2c9b61e6a9878d5612147
diff --git a/oauthlib/common.py b/oauthlib/common.py index f25656f..c1180e6 100644 --- a/oauthlib/common.py +++ b/oauthlib/common.py @@ -114,7 +114,7 @@ def decode_params_utf8(params): return decoded -urlencoded = set(always_safe) | set('=&;:%+~,*@!()/?') +urlencoded = set(always_safe) | set('=&;:%+~,*@!()/?\'$') def urldecode(query):
ValueError when query string contains "'" Similar to Issue #404, `oauthlib.common.urldecode` raises a ValueError if the provided query string contains a "'" character. ``` ValueError: Error trying to decode a non urlencoded string. Found invalid characters: set([u"'"]) in the string: 'url=Schr%C3%B6dinger's%20cat'. Please ensure the request/response body is x-www-form-urlencoded. ``` Per [RFC 3986 section 3.4]( https://tools.ietf.org/html/rfc3986#section-3.4), query strings should be allowed to include the characters defined as `pchar` which in turn allows for characters defined as `sub-delims`. This includes the "'" character.
oauthlib/oauthlib
diff --git a/tests/test_common.py b/tests/test_common.py index b0ea20d..fb4bd5b 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -39,6 +39,8 @@ class EncodingTest(TestCase): self.assertItemsEqual(urldecode('foo=bar@spam'), [('foo', 'bar@spam')]) self.assertItemsEqual(urldecode('foo=bar/baz'), [('foo', 'bar/baz')]) self.assertItemsEqual(urldecode('foo=bar?baz'), [('foo', 'bar?baz')]) + self.assertItemsEqual(urldecode('foo=bar\'s'), [('foo', 'bar\'s')]) + self.assertItemsEqual(urldecode('foo=$'), [('foo', '$')]) self.assertRaises(ValueError, urldecode, 'foo bar') self.assertRaises(ValueError, urldecode, '%R') self.assertRaises(ValueError, urldecode, '%RA')
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "coverage", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 blinker==1.4 certifi==2021.5.30 cffi==1.15.1 coverage==6.2 cryptography==40.0.2 importlib-metadata==4.8.3 iniconfig==1.1.1 nose==1.3.7 -e git+https://github.com/oauthlib/oauthlib.git@371029906aa6ccc9943120e096a7bbdd0ef6945d#egg=oauthlib packaging==21.3 pluggy==1.0.0 py==1.11.0 pycparser==2.21 PyJWT==1.6.0 pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: oauthlib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - blinker==1.4 - cffi==1.15.1 - coverage==6.2 - cryptography==40.0.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pycparser==2.21 - pyjwt==1.6.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/oauthlib
[ "tests/test_common.py::EncodingTest::test_urldecode" ]
[]
[ "tests/test_common.py::ParameterTest::test_add_params_to_uri", "tests/test_common.py::ParameterTest::test_extract_invalid", "tests/test_common.py::ParameterTest::test_extract_non_formencoded_string", "tests/test_common.py::ParameterTest::test_extract_params_blank_string", "tests/test_common.py::ParameterTest::test_extract_params_dict", "tests/test_common.py::ParameterTest::test_extract_params_empty_list", "tests/test_common.py::ParameterTest::test_extract_params_formencoded", "tests/test_common.py::ParameterTest::test_extract_params_twotuple", "tests/test_common.py::GeneratorTest::test_generate_client_id", "tests/test_common.py::GeneratorTest::test_generate_nonce", "tests/test_common.py::GeneratorTest::test_generate_timestamp", "tests/test_common.py::GeneratorTest::test_generate_token", "tests/test_common.py::RequestTest::test_dict_body", "tests/test_common.py::RequestTest::test_empty_dict_body", "tests/test_common.py::RequestTest::test_empty_list_body", "tests/test_common.py::RequestTest::test_empty_string_body", "tests/test_common.py::RequestTest::test_getattr_existing_attribute", "tests/test_common.py::RequestTest::test_getattr_raise_attribute_error", "tests/test_common.py::RequestTest::test_getattr_return_default", "tests/test_common.py::RequestTest::test_list_body", "tests/test_common.py::RequestTest::test_non_formencoded_string_body", "tests/test_common.py::RequestTest::test_non_unicode_params", "tests/test_common.py::RequestTest::test_none_body", "tests/test_common.py::RequestTest::test_param_free_sequence_body", "tests/test_common.py::RequestTest::test_password_body", "tests/test_common.py::RequestTest::test_sanitizing_authorization_header", "tests/test_common.py::RequestTest::test_token_body", "tests/test_common.py::CaseInsensitiveDictTest::test_basic", "tests/test_common.py::CaseInsensitiveDictTest::test_update" ]
[]
BSD 3-Clause "New" or "Revised" License
2,793
[ "oauthlib/common.py" ]
[ "oauthlib/common.py" ]
pennmem__cmlreaders-129
828f57fa7cae9033917802ef646c1ad334790ed3
2018-07-19 17:03:02
177d8508b999957ec1492ecaa7775179ad875454
diff --git a/cmlreaders/cmlreader.py b/cmlreaders/cmlreader.py index 6ae4bfd..7d35bc4 100644 --- a/cmlreaders/cmlreader.py +++ b/cmlreaders/cmlreader.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import List, Optional import numpy as np import pandas as pd @@ -233,3 +233,47 @@ class CMLReader(object): }) return self.load('eeg', **kwargs) + + @classmethod + def load_events(cls, subjects: Optional[List[str]] = None, + experiments: Optional[List[str]] = None, + rootdir: Optional[str] = None) -> pd.DataFrame: + """Load events from multiple sessions. + + Parameters + ---------- + subjects + List of subjects. + experiments + List of experiments to include. + rootdir + Path to root data directory. + + """ + if subjects is None and experiments is None: + raise ValueError( + "Please specify at least one subject or experiment." + ) + + rootdir = get_root_dir(rootdir) + df = get_data_index("all", rootdir=rootdir) + + if subjects is None: + subjects = df["subject"].unique() + + if experiments is None: + experiments = df["experiment"].unique() + + events = [] + + for subject in subjects: + for experiment in experiments: + mask = (df["subject"] == subject) & (df["experiment"] == experiment) + sessions = df[mask]["session"].unique() + + for session in sessions: + reader = CMLReader(subject, experiment, session, + rootdir=rootdir) + events.append(reader.load("events")) + + return pd.concat(events)
Multi-session event reading A common use case when working with events is to load multiple sessions at a time. Right now, this is not as easy as it should be. ```python all_events = [] for session in sessions_completed: sess_events = cml.CMLReader(subject="R1409D", experiment="FR6", session=session, localization=0, montage=0, rootdir=rhino_root).load('task_events') all_events.append(sess_events) all_sessions_df = pd.concat(all_events) ``` The proposed API is to have a special method associated with cml_reader to allow loading multiple sessions of events. The other option is to allow additional kwargs in .load(), but then it becomes extremely difficult to document that function since it would take different parameters depending on the data type being loaded. Instead, we want to mimic the behavior of load_eeg and have it be a separate method associated with the class. Single sessions of events can be loaded using either reader.load() after having specified a session when creating the reader, or by using reader.load_events(sessions=[1]). At a minimum, the following cases should be handled: - Given a single experiment, load all completed sessions - Given a single experiment, load a specific subset of sessions ```python reader = CMLReader(subject="R1409D", experiment="FR6") # Load all sessions all_fr6_events = reader.load_events() # Load specific sessions subset_fr5_events = reader.load_events(sessions=[0, 1]) ``` - Given multiple experiments, load all completed sessions from each experiment - Given a reader with no experiment specified, raise an error if load_events is called ```python reader = CMLReader(subject="R1409D") # Invalid Request all_events = reader.load_events() # Load sessions across experiments all_record_only_events = reader.load_events(experiments=['catFR1', 'FR1']) ``` Depending on if it is important enough of a use case, it could also handle the following cases: - Given multiple experiments and a specific session, load that session number of each experiment, i.e. the first session of FR1 and catFR1 for a particular subject - Given multiple experiments and a specific set of sessions, load those specific sessions for each experiment given, raising an error if any of the requested session/experiment combinations are not available ```python reader = CMLReader(subject="R1409D") # Multi-experiment, single session multi_exp_single_sess = reader.load_events(experiments=['catFR1', 'FR1'], sessions=[0]) # Multi-experiment, multi-session multi_exp_multi_sess = reader.load_events(experiments=['catFR1', 'FR1'], sessions=[0, 1]) ```
pennmem/cmlreaders
diff --git a/cmlreaders/test/test_cmlreader.py b/cmlreaders/test/test_cmlreader.py index f222842..614fb8e 100644 --- a/cmlreaders/test/test_cmlreader.py +++ b/cmlreaders/test/test_cmlreader.py @@ -200,3 +200,25 @@ class TestLoadMontage: with pytest.raises(exc.MissingDataError): reader.load(kind, read_categories=True) + + [email protected] [email protected] +class TestLoadAggregate: + @pytest.mark.parametrize("subjects,experiments,unique_sessions", [ + (None, None, None), + (["R1111M", "R1260D"], ["FR1"], 5), + (["R1111M"], None, 22), + (["R1111M"], ["PS2"], 6), + (None, ["FR2"], 79), + ]) + def test_load_events(self, subjects, experiments, unique_sessions, + rhino_root): + if subjects is experiments is None: + with pytest.raises(ValueError): + CMLReader.load_events(subjects, experiments, rootdir=rhino_root) + return + + events = CMLReader.load_events(subjects, experiments, rootdir=rhino_root) + size = len(events.groupby(["subject", "experiment", "session"]).size()) + assert size == unique_sessions
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 cached-property==1.5.2 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/pennmem/cmlreaders.git@828f57fa7cae9033917802ef646c1ad334790ed3#egg=cmlreaders codecov==2.1.13 coverage==6.2 cycler==0.11.0 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 flake8==3.9.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.6.1 mistune==0.8.4 mne==0.23.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.7.0 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 zipp==3.6.0
name: cmlreaders channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cached-property==1.5.2 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cycler==0.11.0 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - flake8==3.9.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.6.1 - mistune==0.8.4 - mne==0.23.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cmlreaders
[ "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[None-None-None]" ]
[ "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[voxel_coordinates-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_excluded_leads-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[jacksheet-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[good_leads-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[leads-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_coordinates-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[prior_stim_results-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[target_selection_table-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_categories-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_summary-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_summary-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[session_summary-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[pairs-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[contacts-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[localization-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[baseline_classifier-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[used_classifier-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[all_events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[all_events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[task_events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[task_events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[baseline_classifier.zip]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[used_classifier.zip]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[baseline_classifier]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[used_classifier]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_ps4_events[R1354E-PS4_FR-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_ps2_events", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[True-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[True-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[False-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[False-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_missing[contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_missing[pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[subjects1-experiments1-5]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[subjects2-None-22]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[subjects3-experiments3-6]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[None-experiments4-79]" ]
[ "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-catFR1-0-0-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-catFR1-None-0-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-PAL1-None-2-2]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-PAL3-2-2-2]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-TH1-0-0-0]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-TH1-None-0-0]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[LTP093-ltpFR2-0-None-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[voxel_coordinates-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_excluded_leads-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[jacksheet-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[good_leads-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[leads-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_coordinates-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[prior_stim_results-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[target_selection_table-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_categories-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_summary-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_summary-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[session_summary-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[pairs-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[contacts-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[localization-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[baseline_classifier-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[used_classifier-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[voxel_coordinates.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[classifier_excluded_leads.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[jacksheet.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[good_leads.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[leads.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[electrode_coordinates.csv]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[prior_stim_results.csv]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[target_selection_table.csv]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[pairs.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[contacts.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[localization.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[all_events.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[math_events.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[task_events.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[voxel_coordinates]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[classifier_excluded_leads]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[jacksheet]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[good_leads]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[leads]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[electrode_coordinates]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[prior_stim_results]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[target_selection_table]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[pairs]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[contacts]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[localization]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[all_events]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[math_events]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[task_events]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_unimplemented", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[True-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[True-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[False-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[False-pairs]" ]
[]
null
2,794
[ "cmlreaders/cmlreader.py" ]
[ "cmlreaders/cmlreader.py" ]
beetbox__beets-2988
66443169ad17f1e062620403f45eae7200e633a9
2018-07-20 00:53:52
0f9ffeec3eb95f2612e78bee6380af984f639b78
FichteFoll: 1. Not sure what you mean by this. Formatting works exactly like it did before. However, I missed that `self.model_keys` is fetched from `model.keys(True)`, which includes the album fallback. I'll override `self.model_keys` in `FormattedItemMapping`'s `__init__`. 2. I see. So, I could keep an Item-internal reference to an album and just run `load` on that before trying to access album attributes (or `keys()`). Is `load` smart enough not to update itself when there haven't been any changes to the database since the last fetch? If it doesn't, then this change would be more involved and requires some care to actually make the caching useful. (I was thinking about storing the same album reference in multiple items for example, but I'd need to call `load` for each item individually anyway because we can't be sure the database hasn't been updated in the meantime.) Otherwise, just keeping a lazy-loaded album attribute per item individually would probably already be an improvement. 3. I'm not sure I understand the problem you're describing because, as far as I'm aware, all of this is still possible. Attribute access on the item itself is prioritized over the album fallback, for their standard fields and even for flexattrs. `_setitem` was not modified, so you can also still set an item's field or flexattr to override an album's. sampsyo: > Not sure what you mean by this. Formatting works exactly like it did before. However, I missed that self.model_keys is fetched from model.keys(True), which includes the album fallback (and results in the item's formatter for a field being requested rather than the album's). I'll override self.model_keys in FormattedItemMapping's __init__. Thanks for catching that! What I'm worried about is not a direct conflict or anything—just that we're implementing the same logic ("fallback" between item and album attributes) twice. If evaluating the expression `item.field` already looks up `field` in `item`'s album, then ideally we would not need FormattedItemMapping—the plain old FormattedMapping from dbcore would do the trick. But as you discovered, there's subtlety about which formatter gets used. Maybe there's an elegant way to provide a merged view without the duplication, but maybe this division of responsibilities is OK. > I see. So, I could keep an Item-internal reference to an album and just run load on that before trying to access album attributes (or keys()). Is load smart enough not to update itself when there haven't been any changes to the database since the last fetch? If it doesn't, then this change would be more involved and requires some care to actually make the caching useful. (I was thinking about storing the same album reference in multiple items for example, but I'd need to call load for each item individually anyway because we can't be sure the database hasn't been updated in the meantime.) Otherwise, just keeping a lazy-loaded album attribute per item individually would probably already be an improvement. No, `load` always loads the latest data. (Otherwise, we'd need some mechanism on the side for tracking when the database has changed—which likely would be no faster to check than just loading from the database.) > I'm not sure I understand the problem you're describing because, as far as I'm aware, all of this is still possible. Attribute access on the item itself is prioritized over the album fallback, for their standard fields and even for flexattrs. _setitem was not modified, so you can also still set an item's field or flexattr to override an album's. OK, good point! I had missed that existing values on items take precedence. That means, unless I'm mistaken, that item-level fixed attributes *always* take precedence—because it's impossible to remove them. Sounds good! FichteFoll: ``` ~ λ hyperfine "beet list" -m 2 Benchmark #1: beet list Time (mean ± σ): 7.207 s ± 0.018 s [User: 5.438 s, System: 0.759 s] Range (min … max): 7.194 s … 7.220 s ~/code/beets ∃ hyperfine "python -m beets list" -m 2 Benchmark #1: python -m beets list Time (mean ± σ): 17.757 s ± 0.093 s [User: 12.581 s, System: 2.135 s] Range (min … max): 17.691 s … 17.823 s ``` Well, not looking so bright. It's a >100% slowdown. This'd need some smart caching, probably. I do wonder why the difference is so hight, though. I mean, the ItemFormatter needed to access the item's album before as well. Maybe `keys` is run more often than I expected? Also, I should probably add some documentation about this change. sampsyo: Hmm, that is a little worrisome. Let's dig a little deeper and see if we can't mitigate some of the effects. (Thanks for the tip about hyperfine, btw!) FichteFoll: (I just found out about hyperfine today as I browsed the fd Readme by the same author.) This is probably the point where I would start to look into profiling as I'm still not too familiar with the code base and believe this would provide a good starting point. Have you ever done this in python and have some recommendations for tools or other tips? (I haven't.) sampsyo: I think that `cProfile`, in the standard library, is still probably the best profiler out there. One tip I do have, however, is that [SnakeViz](https://jiffyclub.github.io/snakeviz/) is a really nice browser-based GUI for viewing/navigating profile data. FichteFoll: Took a look back at this. I used [py-spy](https://github.com/benfred/py-spy) for some quick effort-less profiling and it was quite obvious that the majority of the time is being spent with database access in `get_album` (or rather the `album` property, as I changed it). Just uncommenting the `album.load()` code removes the entire performance impact, but it also means the albums we're trying to print could be outdated. I considered the simplest solution forward to be what I suggested earlier: 1. Make the item cache its album field and provide access through a property. The album returned by this property is *read only* since it is, well, cached. I decided against preventive measures here and instead made the property "hidden" with an underscore and provided documentation. 2. Only load database model objects when they have changed by tracking a revision number that I added to the database and increase on each mutating transaction. Had to tweak this for a little while until it passed all tests, but I suppose this is fairly safe going forward now. I added a comment clarifying on the possibility of race conditions, but as long as the `_db_lock` is aquired, we are fine. Let me know what you think. Also, I wasn't sure if I should add a section regarding the API to the changelog. It would mention the fallback of item access on Item and that re-loading is now lazy, although the latter should be transparent. FichteFoll: I see. Yes, that would probably be useful for the aunique feature and might even warrant a proper implementation (i.e. a "public API"), but I'd rather not do this in here. Regarding the changelog, I can do that. I deliberately made `_cached_album` an internal property (with the underscore) because I wasn't confident in exposing it. It is, after all, kind of a workaround, although it's the best I could think of. But as long as it's private, it can be changed. By the way, as you can see from the commit that references this PR, I started using this in production and haven't encountered any issues so far. I probably could have done that before as well, as I wasn't too concerned about the performance, but for this PR it was a must. sampsyo: OK, great. Since it’s a sensitive change, it might be wise to put out a call for testing so folks can try it out with funky configurations. I’ll post something to Discourse. sampsyo: Post added: https://discourse.beets.io/t/call-for-testers-better-queries-for-album-level-fields-a-performance-improvement/477 sampsyo: (Thanks. I restarted that Travis job and everything’s fine.) FichteFoll: Any updates on this? Doesn't seem like the discourse thread attracted much attention. FichteFoll: Rebased to fix the merge conflict on the changelog. AppVeyor has some errors in the setup phase with chocolatey. FichteFoll: Still nobody using this, it seems. :disappointed: Let me know when you intend to merge this, so I only need to fix the changelog conflict once (or you do it :man_shrugging: ). FichteFoll: Someone was asking for this a few days ago on IRC, but I missed them and couldn't point towards this PR. Anyway, I've been using this branch for half a year now with exactly 0 issues so far. I don't use the entire feature set of beets, but importing and path styles based on album flexattrs, which is my primary use case, are just fine. I'll try to remember making a new speed comparison since my library grew a bit over time, but I don't expect it to be much different compared to the last time. kergoth: FYI, I ran into a number of issues with this, mostly relating to types in the fallback, both in path format queries and in `beet ls`. See https://discourse.beets.io/t/ranges-not-working-in-beet-ls-with-album-fields-in-item-track-context/ arcresu: I wasn't aware of this when I threw together the diff on the discourse thread @kergoth mentioned. I'll just reproduce it here: ```diff diff --git a/beets/library.py b/beets/library.py index 16db1e97..71b6db22 100644 --- a/beets/library.py +++ b/beets/library.py @@ -526,7 +526,17 @@ class Item(LibModel): @classmethod def _getters(cls): - getters = plugins.item_field_getters() + def atoi(f, ag): + def ig(i): + a = i.get_album() + if a: + return ag(a) + else: + return cls._type(f).null + return ig + getters = {f: atoi(f, g) + for f, g in plugins.album_field_getters().items()} + getters.update(plugins.item_field_getters()) getters['singleton'] = lambda i: i.album_id is None getters['filesize'] = Item.try_filesize # In bytes. return getters diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py index 327db6b0..c3adc72d 100644 --- a/beets/ui/__init__.py +++ b/beets/ui/__init__.py @@ -1145,7 +1145,10 @@ def _setup(options, lib=None): plugins.send("library_opened", lib=lib) # Add types and queries defined by plugins. - library.Item._types.update(plugins.types(library.Item)) + at = plugins.types(library.Album) + at.update(library.Item._types) + at.update(plugins.types(library.Item)) + library.Item._types = at library.Album._types.update(plugins.types(library.Album)) library.Item._queries.update(plugins.named_queries(library.Item)) library.Album._queries.update(plugins.named_queries(library.Album)) ``` This wasn't intended to be a final implementation, but my approach was a little bit different in that I thought the album-item relationship was something beets-specific and therefore should be reflected in `library.py` rather than dbcore. I used the existing getter mechanism. The `atoi` function takes an album-level getter and converts it into an item-level one that fetches the item's album and delegates to the original getter. Item-level properties still have precedence, as in this PR. I did find that it was necessary to also change `Item._types` in order to get queries to work as intended since otherwise the album-level fields don't have type information when accessed on `Item`s. Note that we recently picked up a helper for memoisation in another PR: https://github.com/beetbox/beets/blob/909fd1eb272691d5cccaa0426db74871c770a8bb/beets/util/__init__.py#L1037-L1057 FichteFoll: Thanks for the headsup. I suspect that the problem with ranges is related to me not updating the items' type information, as you did in your diff. I was entirely new to the code base before working on this, so I just never considered that to be relevant. The `lazy_property` is similar so something I drafted earlier in the process but ended up scraping because of what I outlined in an earlier comment (https://github.com/beetbox/beets/pull/2988#issuecomment-406897912). The problem here is that the cached album is a snapshot of the database at whatever time it was first accessed, but the db may change during runtime and the lazy property will have no way to consider that fact. I'll take a closer look at your getter approach when I find some time to work on this again. (I'd like to mention that I cannot use beets without this feature anymore, so even if there is a huge update going on, I'll continue using my fork until I updated the PR for the changes.)
diff --git a/beets/dbcore/db.py b/beets/dbcore/db.py index 46b47a2e1..409ecc9af 100755 --- a/beets/dbcore/db.py +++ b/beets/dbcore/db.py @@ -56,10 +56,11 @@ class FormattedMapping(Mapping): are replaced. """ - def __init__(self, model, for_path=False): + def __init__(self, model, for_path=False, compute_keys=True): self.for_path = for_path self.model = model - self.model_keys = model.keys(True) + if compute_keys: + self.model_keys = model.keys(True) def __getitem__(self, key): if key in self.model_keys: @@ -257,6 +258,11 @@ class Model(object): value is the same as the old value (e.g., `o.f = o.f`). """ + _revision = -1 + """A revision number from when the model was loaded from or written + to the database. + """ + @classmethod def _getters(cls): """Return a mapping from field names to getter functions. @@ -309,9 +315,11 @@ class Model(object): def clear_dirty(self): """Mark all fields as *clean* (i.e., not needing to be stored to - the database). + the database). Also update the revision. """ self._dirty = set() + if self._db: + self._revision = self._db.revision def _check_db(self, need_id=True): """Ensure that this object is associated with a database row: it @@ -351,9 +359,9 @@ class Model(object): """ return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT - def __getitem__(self, key): - """Get the value for a field. Raise a KeyError if the field is - not available. + def _get(self, key, default=None, raise_=False): + """Get the value for a field, or `default`. Alternatively, + raise a KeyError if the field is not available. """ getters = self._getters() if key in getters: # Computed. @@ -365,8 +373,18 @@ class Model(object): return self._type(key).null elif key in self._values_flex: # Flexible. return self._values_flex[key] - else: + elif raise_: raise KeyError(key) + else: + return default + + get = _get + + def __getitem__(self, key): + """Get the value for a field. Raise a KeyError if the field is + not available. + """ + return self._get(key, raise_=True) def _setitem(self, key, value): """Assign the value for a field, return whether new and old value @@ -441,19 +459,10 @@ class Model(object): for key in self: yield key, self[key] - def get(self, key, default=None): - """Get the value for a given key or `default` if it does not - exist. - """ - if key in self: - return self[key] - else: - return default - def __contains__(self, key): """Determine whether `key` is an attribute on this object. """ - return key in self.keys(True) + return key in self.keys(computed=True) def __iter__(self): """Iterate over the available field names (excluding computed @@ -538,8 +547,14 @@ class Model(object): def load(self): """Refresh the object's metadata from the library database. + + If check_revision is true, the database is only queried loaded when a + transaction has been committed since the item was last loaded. """ self._check_db() + if not self._dirty and self._db.revision == self._revision: + # Exit early + return stored_obj = self._db._get(type(self), self.id) assert stored_obj is not None, u"object {0} not in DB".format(self.id) self._values_fixed = LazyConvertDict(self) @@ -794,6 +809,12 @@ class Transaction(object): """A context manager for safe, concurrent access to the database. All SQL commands should be executed through a transaction. """ + + _mutated = False + """A flag storing whether a mutation has been executed in the + current transaction. + """ + def __init__(self, db): self.db = db @@ -815,12 +836,15 @@ class Transaction(object): entered but not yet exited transaction. If it is the last active transaction, the database updates are committed. """ + # Beware of races; currently secured by db._db_lock + self.db.revision += self._mutated with self.db._tx_stack() as stack: assert stack.pop() is self empty = not stack if empty: # Ending a "root" transaction. End the SQLite transaction. self.db._connection().commit() + self._mutated = False self.db._db_lock.release() def query(self, statement, subvals=()): @@ -836,7 +860,6 @@ class Transaction(object): """ try: cursor = self.db._connection().execute(statement, subvals) - return cursor.lastrowid except sqlite3.OperationalError as e: # In two specific cases, SQLite reports an error while accessing # the underlying database file. We surface these exceptions as @@ -846,9 +869,14 @@ class Transaction(object): raise DBAccessError(e.args[0]) else: raise + else: + self._mutated = True + return cursor.lastrowid def script(self, statements): """Execute a string containing multiple SQL statements.""" + # We don't know whether this mutates, but quite likely it does. + self._mutated = True self.db._connection().executescript(statements) @@ -864,6 +892,11 @@ class Database(object): supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension') """Whether or not the current version of SQLite supports extensions""" + revision = 0 + """The current revision of the database. To be increased whenever + data is written in a transaction. + """ + def __init__(self, path, timeout=5.0): self.path = path self.timeout = timeout diff --git a/beets/importer.py b/beets/importer.py index 3220b260f..c5701ff30 100644 --- a/beets/importer.py +++ b/beets/importer.py @@ -786,7 +786,7 @@ class ImportTask(BaseImportTask): if (not dup_item.album_id or dup_item.album_id in replaced_album_ids): continue - replaced_album = dup_item.get_album() + replaced_album = dup_item._cached_album if replaced_album: replaced_album_ids.add(dup_item.album_id) self.replaced_albums[replaced_album.path] = replaced_album diff --git a/beets/library.py b/beets/library.py index 78552bb61..3ba79d069 100644 --- a/beets/library.py +++ b/beets/library.py @@ -375,7 +375,11 @@ class FormattedItemMapping(dbcore.db.FormattedMapping): """ def __init__(self, item, for_path=False): - super(FormattedItemMapping, self).__init__(item, for_path) + # We treat album and item keys specially here, + # so exclude transitive album keys from the model's keys. + super(FormattedItemMapping, self).__init__(item, for_path, + compute_keys=False) + self.model_keys = item.keys(computed=True, with_album=False) self.item = item @lazy_property @@ -386,15 +390,15 @@ class FormattedItemMapping(dbcore.db.FormattedMapping): def album_keys(self): album_keys = [] if self.album: - for key in self.album.keys(True): + for key in self.album.keys(computed=True): if key in Album.item_keys \ or key not in self.item._fields.keys(): album_keys.append(key) return album_keys - @lazy_property + @property def album(self): - return self.item.get_album() + return self.item._cached_album def _get(self, key): """Get the value for a key, either from the album or the item. @@ -545,6 +549,29 @@ class Item(LibModel): _format_config_key = 'format_item' + __album = None + """Cached album object. Read-only.""" + + @property + def _cached_album(self): + """The Album object that this item belongs to, if any, or + None if the item is a singleton or is not associated with a + library. + The instance is cached and refreshed on access. + + DO NOT MODIFY! + If you want a copy to modify, use :meth:`get_album`. + """ + if not self.__album and self._db: + self.__album = self._db.get_album(self) + elif self.__album: + self.__album.load() + return self.__album + + @_cached_album.setter + def _cached_album(self, album): + self.__album = album + @classmethod def _getters(cls): getters = plugins.item_field_getters() @@ -571,12 +598,45 @@ class Item(LibModel): value = bytestring_path(value) elif isinstance(value, BLOB_TYPE): value = bytes(value) + elif key == 'album_id': + self._cached_album = None changed = super(Item, self)._setitem(key, value) if changed and key in MediaFile.fields(): self.mtime = 0 # Reset mtime on dirty. + def __getitem__(self, key): + """Get the value for a field, falling back to the album if + necessary. Raise a KeyError if the field is not available. + """ + try: + return super(Item, self).__getitem__(key) + except KeyError: + if self._cached_album: + return self._cached_album[key] + raise + + def keys(self, computed=False, with_album=True): + """Get a list of available field names. `with_album` + controls whether the album's fields are included. + """ + keys = super(Item, self).keys(computed=computed) + if with_album and self._cached_album: + keys += self._cached_album.keys(computed=computed) + return keys + + def get(self, key, default=None, with_album=True): + """Get the value for a given key or `default` if it does not + exist. Set `with_album` to false to skip album fallback. + """ + try: + return self._get(key, default, raise_=with_album) + except KeyError: + if self._cached_album: + return self._cached_album.get(key, default) + return default + def update(self, values): """Set all key/value pairs in the mapping. If mtime is specified, it is not reset (as it might otherwise be). diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py index 28879a731..362e8752a 100644 --- a/beets/ui/__init__.py +++ b/beets/ui/__init__.py @@ -1155,8 +1155,13 @@ def _setup(options, lib=None): plugins.send("library_opened", lib=lib) # Add types and queries defined by plugins. - library.Item._types.update(plugins.types(library.Item)) - library.Album._types.update(plugins.types(library.Album)) + plugin_types_album = plugins.types(library.Album) + library.Album._types.update(plugin_types_album) + item_types = plugin_types_album.copy() + item_types.update(library.Item._types) + item_types.update(plugins.types(library.Item)) + library.Item._types = item_types + library.Item._queries.update(plugins.named_queries(library.Item)) library.Album._queries.update(plugins.named_queries(library.Album)) diff --git a/beetsplug/convert.py b/beetsplug/convert.py index 275703e97..45a571d38 100644 --- a/beetsplug/convert.py +++ b/beetsplug/convert.py @@ -358,7 +358,7 @@ class ConvertPlugin(BeetsPlugin): item.store() # Store new path and audio data. if self.config['embed'] and not linked: - album = item.get_album() + album = item._cached_album if album and album.artpath: self._log.debug(u'embedding album art from {}', util.displayable_path(album.artpath)) diff --git a/docs/changelog.rst b/docs/changelog.rst index b9020621b..df286a10a 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -178,6 +178,11 @@ New features: * :doc:`/plugins/replaygain` now does its analysis in parallel when using the ``command`` or ``ffmpeg`` backends. :bug:`3478` +* Fields in queries now fall back to an item's album and check its fields too. + Notably, this allows querying items by an album flex attribute, also in path + configuration. + Thanks to :user:`FichteFoll`. + :bug:`2797` :bug:`2988` * Removes usage of the bs1770gain replaygain backend. Thanks to :user:`SamuelCook`. * Added ``trackdisambig`` which stores the recording disambiguation from @@ -344,6 +349,12 @@ For plugin developers: :bug:`3355` * The autotag hooks have been modified such that they now take 'bpm', 'musical_key' and a per-track based 'genre' as attributes. +* Item (and attribute) access on an item now falls back to the album's + attributes as well. If you specifically want to access an item's attributes, + use ``Item.get(key, with_album=False)``. :bug:`2988` +* ``Item.keys`` also has a ``with_album`` argument now, defaulting to ``True``. +* A ``revision`` attribute has been added to ``Database``. It is increased on + every transaction that mutates it. :bug:`2988` For packagers:
Path config query does not work with album-level flexible attributes ### Problem Path queries do not work with attributes that are set at album level. More specifically, I import an album setting at the same time a flexible attribute. I have various paths that are supposed to be modified by this attribute. Though the attribute is set at import time, the query at the path will not match, neither at import time, nor at a later time. My configuration: ``` item_fields: initial: (albumartist_sort or artist_sort or albumartist or artist or '_')[0].upper() disc_count: u'%02i.' % (disc) disc_and_track: u'%02i.%02i' % (disc, track) if disctotal > 1 else u'%02i' % (track) definite_year: u'%04i' % original_year or original_date[:3] or year or date[:3] or 0 album_fields: cond_release_category: | if albumstatus.lower()=='official': if albumtype.lower() in ['album', 'compilation', 'ep']: return None elif supergenre=='Soundtrack' and albumtype.lower() in ['soundtrack']: return None else: return albumtype.capitalize() else: return albumstatus.capitalize() boxset_folder: | if boxset==1: boxdisc_folder=u'%i. %s' % disc, disctitle if boxdisc_year: boxdisc_folder=u'%s %s' % boxdisc_folder, boxdisc_year return boxdisc_folder else: return None paths: default: $supergenre/%asciify{$initial}/$albumartist/%if{$cond_release_category,$cond_release_category/}[$definite_year] $album%aunique{albumartist album,albumdisambig label catalognum year}/$disc_and_track - $title boxset:1: $supergenre/%asciify{$initial}/$albumartist/%if{$cond_release_category,$cond_release_category/}[$definite_year] $album%aunique{albumartist album,albumdisambig label catalognum year}/$disc_count $disctitle%ifdef{boxdisc_year, ($boxdisc_year)}/$track - $title singleton: Non-Album/$artist - $title comp: $supergenre/Compilations/$album%aunique{}/$disc_and_track - $title supergenre:Soundtrack: Soundtrack/$album/%if{$disctotal>1,$disc.}$track $title ``` And this is the result: ```user@beethost:~$ beet ls supergenre:Soundtrack user@beethost:~$ beet ls -a supergenre:Soundtrack ... Michael Nyman - The Piano ... user@beethost:~$ ls -l /music/Soundtrack/N/Michael\ Nyman/ total 0 drwxrwxr-x 2 user user 0 Jan 26 08:50 [1993] The Piano``` Note that the path follows the default template and that the album attribute expands correctly in the template. ### Setup beets version 1.4.6 Python version 2.7.13 Relevant thread at discussion board: https://discourse.beets.io/t/path-config-and-album-level-flexible-ttributes-at-import-time/278/3
beetbox/beets
diff --git a/test/test_dbcore.py b/test/test_dbcore.py index 0d40896da..1dd2284c6 100644 --- a/test/test_dbcore.py +++ b/test/test_dbcore.py @@ -225,6 +225,31 @@ class MigrationTest(unittest.TestCase): self.fail("select failed") +class TransactionTest(unittest.TestCase): + def setUp(self): + self.db = DatabaseFixture1(':memory:') + + def tearDown(self): + self.db._connection().close() + + def test_mutate_increase_revision(self): + old_rev = self.db.revision + with self.db.transaction() as tx: + tx.mutate( + 'INSERT INTO {0} ' + '(field_one) ' + 'VALUES (?);'.format(ModelFixture1._table), + (111,), + ) + self.assertGreater(self.db.revision, old_rev) + + def test_query_no_increase_revision(self): + old_rev = self.db.revision + with self.db.transaction() as tx: + tx.query('PRAGMA table_info(%s)' % ModelFixture1._table) + self.assertEqual(self.db.revision, old_rev) + + class ModelTest(unittest.TestCase): def setUp(self): self.db = DatabaseFixture1(':memory:') @@ -246,6 +271,30 @@ class ModelTest(unittest.TestCase): row = self.db._connection().execute('select * from test').fetchone() self.assertEqual(row['field_one'], 123) + def test_revision(self): + old_rev = self.db.revision + model = ModelFixture1() + model.add(self.db) + model.store() + self.assertEqual(model._revision, self.db.revision) + self.assertGreater(self.db.revision, old_rev) + + mid_rev = self.db.revision + model2 = ModelFixture1() + model2.add(self.db) + model2.store() + self.assertGreater(model2._revision, mid_rev) + self.assertGreater(self.db.revision, model._revision) + + # revision changed, so the model should be re-loaded + model.load() + self.assertEqual(model._revision, self.db.revision) + + # revision did not change, so no reload + mod2_old_rev = model2._revision + model2.load() + self.assertEqual(model2._revision, mod2_old_rev) + def test_retrieve_by_id(self): model = ModelFixture1() model.add(self.db) diff --git a/test/test_ipfs.py b/test/test_ipfs.py index d670bfc25..2fe89e7e5 100644 --- a/test/test_ipfs.py +++ b/test/test_ipfs.py @@ -49,7 +49,7 @@ class IPFSPluginTest(unittest.TestCase, TestHelper): want_item = test_album.items()[2] for check_item in added_album.items(): try: - if check_item.ipfs: + if check_item.get('ipfs', with_album=False): ipfs_item = os.path.basename(want_item.path).decode( _fsencoding(), ) @@ -57,7 +57,8 @@ class IPFSPluginTest(unittest.TestCase, TestHelper): ipfs_item) want_path = bytestring_path(want_path) self.assertEqual(check_item.path, want_path) - self.assertEqual(check_item.ipfs, want_item.ipfs) + self.assertEqual(check_item.get('ipfs', with_album=False), + want_item.ipfs) self.assertEqual(check_item.title, want_item.title) found = True except AttributeError: diff --git a/test/test_library.py b/test/test_library.py index 4e3be878c..51171b1f8 100644 --- a/test/test_library.py +++ b/test/test_library.py @@ -132,6 +132,21 @@ class GetSetTest(_common.TestCase): def test_invalid_field_raises_attributeerror(self): self.assertRaises(AttributeError, getattr, self.i, u'xyzzy') + def test_album_fallback(self): + # integration test of item-album fallback + lib = beets.library.Library(':memory:') + i = item(lib) + album = lib.add_album([i]) + album['flex'] = u'foo' + album.store() + + self.assertTrue('flex' in i) + self.assertFalse('flex' in i.keys(with_album=False)) + self.assertEqual(i['flex'], u'foo') + self.assertEqual(i.get('flex'), u'foo') + self.assertEqual(i.get('flex', with_album=False), None) + self.assertEqual(i.get('flexx'), None) + class DestinationTest(_common.TestCase): def setUp(self): @@ -491,6 +506,24 @@ class DestinationTest(_common.TestCase): dest = self.i.destination() self.assertEqual(dest[-2:], b'XX') + def test_album_field_query(self): + self.lib.directory = b'one' + self.lib.path_formats = [(u'default', u'two'), + (u'flex:foo', u'three')] + album = self.lib.add_album([self.i]) + self.assertEqual(self.i.destination(), np('one/two')) + album['flex'] = u'foo' + album.store() + self.assertEqual(self.i.destination(), np('one/three')) + + def test_album_field_in_template(self): + self.lib.directory = b'one' + self.lib.path_formats = [(u'default', u'$flex/two')] + album = self.lib.add_album([self.i]) + album['flex'] = u'foo' + album.store() + self.assertEqual(self.i.destination(), np('one/foo/two')) + class ItemFormattedMappingTest(_common.LibTestCase): def test_formatted_item_value(self): diff --git a/test/test_query.py b/test/test_query.py index f88a12c92..4017ff44b 100644 --- a/test/test_query.py +++ b/test/test_query.py @@ -109,7 +109,7 @@ class DummyDataTestCase(_common.TestCase, AssertsMixin): items[2].comp = False for item in items: self.lib.add(item) - self.lib.add_album(items[:2]) + self.album = self.lib.add_album(items[:2]) def assert_items_matched_all(self, results): self.assert_items_matched(results, [ @@ -300,6 +300,17 @@ class GetTest(DummyDataTestCase): results = self.lib.items(q) self.assertFalse(results) + def test_album_field_fallback(self): + self.album['albumflex'] = u'foo' + self.album.store() + + q = u'albumflex:foo' + results = self.lib.items(q) + self.assert_items_matched(results, [ + u'foo bar', + u'baz qux', + ]) + def test_invalid_query(self): with self.assertRaises(InvalidQueryArgumentValueError) as raised: dbcore.query.NumericQuery('year', u'199a')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 6 }
1.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 beautifulsoup4==4.13.3 -e git+https://github.com/beetbox/beets.git@66443169ad17f1e062620403f45eae7200e633a9#egg=beets blinker==1.9.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 click==8.1.8 confuse==2.0.1 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work execnet==2.1.1 filetype==1.2.0 Flask==3.1.0 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work itsdangerous==2.2.0 jellyfish==1.2.0 Jinja2==3.1.6 MarkupSafe==3.0.2 mediafile==0.13.0 mock==5.2.0 munkres==1.1.4 musicbrainzngs==0.7.1 mutagen==1.47.0 oauthlib==3.2.2 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pycparser==2.22 pylast==5.5.0 pytest @ file:///croot/pytest_1738938843180/work pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 python-mpd2==3.1.1 python3-discogs-client==2.8 pyxdg==0.28 PyYAML==6.0.2 rarfile==4.2 reflink==0.2.2 requests==2.32.3 requests-oauthlib==2.0.0 responses==0.25.7 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0 Unidecode==1.3.8 urllib3==2.3.0 Werkzeug==3.1.3 zipp==3.21.0
name: beets channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - beautifulsoup4==4.13.3 - blinker==1.9.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - click==8.1.8 - confuse==2.0.1 - coverage==7.8.0 - execnet==2.1.1 - filetype==1.2.0 - flask==3.1.0 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - itsdangerous==2.2.0 - jellyfish==1.2.0 - jinja2==3.1.6 - markupsafe==3.0.2 - mediafile==0.13.0 - mock==5.2.0 - munkres==1.1.4 - musicbrainzngs==0.7.1 - mutagen==1.47.0 - oauthlib==3.2.2 - pycparser==2.22 - pylast==5.5.0 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - python-mpd2==3.1.1 - python3-discogs-client==2.8 - pyxdg==0.28 - pyyaml==6.0.2 - rarfile==4.2 - reflink==0.2.2 - requests==2.32.3 - requests-oauthlib==2.0.0 - responses==0.25.7 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - typing-extensions==4.13.0 - unidecode==1.3.8 - urllib3==2.3.0 - werkzeug==3.1.3 - zipp==3.21.0 prefix: /opt/conda/envs/beets
[ "test/test_dbcore.py::TransactionTest::test_mutate_increase_revision", "test/test_dbcore.py::TransactionTest::test_query_no_increase_revision", "test/test_dbcore.py::ModelTest::test_revision", "test/test_ipfs.py::IPFSPluginTest::test_stored_hashes", "test/test_library.py::GetSetTest::test_album_fallback", "test/test_library.py::DestinationTest::test_album_field_query", "test/test_query.py::GetTest::test_album_field_fallback" ]
[ "test/test_library.py::DestinationTest::test_asciify_character_expanding_to_slash", "test/test_library.py::DestinationFunctionTest::test_asciify_variable", "test/test_library.py::WriteTest::test_no_write_permission" ]
[ "test/test_dbcore.py::MigrationTest::test_extra_model_adds_table", "test/test_dbcore.py::MigrationTest::test_open_with_fewer_fields_leaves_untouched", "test/test_dbcore.py::MigrationTest::test_open_with_multiple_new_fields", "test/test_dbcore.py::MigrationTest::test_open_with_new_field_adds_column", "test/test_dbcore.py::MigrationTest::test_open_with_same_fields_leaves_untouched", "test/test_dbcore.py::ModelTest::test_add_model", "test/test_dbcore.py::ModelTest::test_check_db_fails", "test/test_dbcore.py::ModelTest::test_computed_field", "test/test_dbcore.py::ModelTest::test_delete_fixed_attribute", "test/test_dbcore.py::ModelTest::test_delete_flexattr", "test/test_dbcore.py::ModelTest::test_delete_flexattr_persists", "test/test_dbcore.py::ModelTest::test_delete_flexattr_via_dot", "test/test_dbcore.py::ModelTest::test_delete_internal_field", "test/test_dbcore.py::ModelTest::test_delete_non_existent_attribute", "test/test_dbcore.py::ModelTest::test_items", "test/test_dbcore.py::ModelTest::test_load_deleted_flex_field", "test/test_dbcore.py::ModelTest::test_missing_field", "test/test_dbcore.py::ModelTest::test_normalization_for_typed_flex_fields", "test/test_dbcore.py::ModelTest::test_null_value_normalization_by_type", "test/test_dbcore.py::ModelTest::test_null_value_stays_none_for_untyped_field", "test/test_dbcore.py::ModelTest::test_parse_nonstring", "test/test_dbcore.py::ModelTest::test_retrieve_by_id", "test/test_dbcore.py::ModelTest::test_store_and_retrieve_flexattr", "test/test_dbcore.py::ModelTest::test_store_fixed_field", "test/test_dbcore.py::FormatTest::test_format_fixed_field_integer", "test/test_dbcore.py::FormatTest::test_format_fixed_field_integer_normalized", "test/test_dbcore.py::FormatTest::test_format_fixed_field_string", "test/test_dbcore.py::FormatTest::test_format_flex_field", "test/test_dbcore.py::FormatTest::test_format_flex_field_bytes", "test/test_dbcore.py::FormatTest::test_format_typed_flex_field", "test/test_dbcore.py::FormatTest::test_format_unset_field", "test/test_dbcore.py::FormattedMappingTest::test_get_method_with_default", "test/test_dbcore.py::FormattedMappingTest::test_get_method_with_specified_default", "test/test_dbcore.py::FormattedMappingTest::test_get_unset_field", "test/test_dbcore.py::FormattedMappingTest::test_keys_equal_model_keys", "test/test_dbcore.py::ParseTest::test_parse_fixed_field", "test/test_dbcore.py::ParseTest::test_parse_flex_field", "test/test_dbcore.py::ParseTest::test_parse_untyped_field", "test/test_dbcore.py::QueryParseTest::test_colon_at_end", "test/test_dbcore.py::QueryParseTest::test_empty_query_part", "test/test_dbcore.py::QueryParseTest::test_escaped_colon", "test/test_dbcore.py::QueryParseTest::test_escaped_colon_in_regexp", "test/test_dbcore.py::QueryParseTest::test_keyed_regexp", "test/test_dbcore.py::QueryParseTest::test_multiple_years", "test/test_dbcore.py::QueryParseTest::test_one_basic_regexp", "test/test_dbcore.py::QueryParseTest::test_one_basic_term", "test/test_dbcore.py::QueryParseTest::test_one_keyed_term", "test/test_dbcore.py::QueryParseTest::test_single_year", "test/test_dbcore.py::QueryFromStringsTest::test_empty_query_part", "test/test_dbcore.py::QueryFromStringsTest::test_parse_fixed_type_query", "test/test_dbcore.py::QueryFromStringsTest::test_parse_flex_type_query", "test/test_dbcore.py::QueryFromStringsTest::test_parse_named_query", "test/test_dbcore.py::QueryFromStringsTest::test_two_parts", "test/test_dbcore.py::QueryFromStringsTest::test_zero_parts", "test/test_dbcore.py::SortFromStringsTest::test_fixed_field_sort", "test/test_dbcore.py::SortFromStringsTest::test_flex_field_sort", "test/test_dbcore.py::SortFromStringsTest::test_one_parts", "test/test_dbcore.py::SortFromStringsTest::test_special_sort", "test/test_dbcore.py::SortFromStringsTest::test_two_parts", "test/test_dbcore.py::SortFromStringsTest::test_zero_parts", "test/test_dbcore.py::ParseSortedQueryTest::test_and_query", "test/test_dbcore.py::ParseSortedQueryTest::test_leading_comma_or_query", "test/test_dbcore.py::ParseSortedQueryTest::test_no_space_before_comma_or_query", "test/test_dbcore.py::ParseSortedQueryTest::test_no_spaces_or_query", "test/test_dbcore.py::ParseSortedQueryTest::test_only_direction", "test/test_dbcore.py::ParseSortedQueryTest::test_or_query", "test/test_dbcore.py::ParseSortedQueryTest::test_trailing_comma_or_query", "test/test_dbcore.py::ResultsIteratorTest::test_concurrent_iterators", "test/test_dbcore.py::ResultsIteratorTest::test_iterate_once", "test/test_dbcore.py::ResultsIteratorTest::test_iterate_slow_sort", "test/test_dbcore.py::ResultsIteratorTest::test_iterate_twice", "test/test_dbcore.py::ResultsIteratorTest::test_length", "test/test_dbcore.py::ResultsIteratorTest::test_no_results", "test/test_dbcore.py::ResultsIteratorTest::test_out_of_range", "test/test_dbcore.py::ResultsIteratorTest::test_slow_query", "test/test_dbcore.py::ResultsIteratorTest::test_slow_query_negative", "test/test_dbcore.py::ResultsIteratorTest::test_slow_sort_subscript", "test/test_dbcore.py::ResultsIteratorTest::test_unsorted_subscript", "test/test_library.py::LoadTest::test_load_clears_dirty_flags", "test/test_library.py::LoadTest::test_load_restores_data_from_db", "test/test_library.py::StoreTest::test_store_changes_database_value", "test/test_library.py::StoreTest::test_store_clears_dirty_flags", "test/test_library.py::StoreTest::test_store_only_writes_dirty_fields", "test/test_library.py::AddTest::test_item_add_inserts_row", "test/test_library.py::AddTest::test_library_add_path_inserts_row", "test/test_library.py::RemoveTest::test_remove_deletes_from_db", "test/test_library.py::GetSetTest::test_invalid_field_raises_attributeerror", "test/test_library.py::GetSetTest::test_set_changes_value", "test/test_library.py::GetSetTest::test_set_does_not_dirty_if_value_unchanged", "test/test_library.py::GetSetTest::test_set_sets_dirty_flag", "test/test_library.py::DestinationTest::test_album_field_in_template", "test/test_library.py::DestinationTest::test_albumartist_falls_back_to_artist", "test/test_library.py::DestinationTest::test_albumartist_overrides_artist", "test/test_library.py::DestinationTest::test_albumtype_path_fallback_to_comp", "test/test_library.py::DestinationTest::test_albumtype_query_path", "test/test_library.py::DestinationTest::test_artist_falls_back_to_albumartist", "test/test_library.py::DestinationTest::test_artist_overrides_albumartist", "test/test_library.py::DestinationTest::test_asciify_and_replace", "test/test_library.py::DestinationTest::test_comp_before_singleton_path", "test/test_library.py::DestinationTest::test_comp_path", "test/test_library.py::DestinationTest::test_default_path_for_non_compilations", "test/test_library.py::DestinationTest::test_destination_escapes_leading_dot", "test/test_library.py::DestinationTest::test_destination_escapes_slashes", "test/test_library.py::DestinationTest::test_destination_long_names_keep_extension", "test/test_library.py::DestinationTest::test_destination_long_names_truncated", "test/test_library.py::DestinationTest::test_destination_pads_date_values", "test/test_library.py::DestinationTest::test_destination_pads_some_indices", "test/test_library.py::DestinationTest::test_destination_preserves_extension", "test/test_library.py::DestinationTest::test_destination_preserves_legitimate_slashes", "test/test_library.py::DestinationTest::test_destination_substitutes_metadata_values", "test/test_library.py::DestinationTest::test_destination_with_replacements", "test/test_library.py::DestinationTest::test_directory_works_with_trailing_slash", "test/test_library.py::DestinationTest::test_directory_works_without_trailing_slash", "test/test_library.py::DestinationTest::test_distination_windows_removes_both_separators", "test/test_library.py::DestinationTest::test_get_formatted_datetime", "test/test_library.py::DestinationTest::test_get_formatted_does_not_replace_separators", "test/test_library.py::DestinationTest::test_get_formatted_none", "test/test_library.py::DestinationTest::test_get_formatted_pads_with_zero", "test/test_library.py::DestinationTest::test_get_formatted_uses_kbps_bitrate", "test/test_library.py::DestinationTest::test_get_formatted_uses_khz_samplerate", "test/test_library.py::DestinationTest::test_heterogeneous_album_gets_single_directory", "test/test_library.py::DestinationTest::test_legalize_path_one_for_many_replacement", "test/test_library.py::DestinationTest::test_legalize_path_one_for_one_replacement", "test/test_library.py::DestinationTest::test_lower_case_extension", "test/test_library.py::DestinationTest::test_non_mbcs_characters_on_windows", "test/test_library.py::DestinationTest::test_path_with_format", "test/test_library.py::DestinationTest::test_singleton_path", "test/test_library.py::DestinationTest::test_unicode_extension_in_fragment", "test/test_library.py::DestinationTest::test_unicode_normalized_nfc_on_linux", "test/test_library.py::DestinationTest::test_unicode_normalized_nfd_on_mac", "test/test_library.py::ItemFormattedMappingTest::test_album_field_overrides_item_field_for_path", "test/test_library.py::ItemFormattedMappingTest::test_album_flex_field", "test/test_library.py::ItemFormattedMappingTest::test_albumartist_falls_back_to_artist", "test/test_library.py::ItemFormattedMappingTest::test_artist_falls_back_to_albumartist", "test/test_library.py::ItemFormattedMappingTest::test_both_artist_and_albumartist_empty", "test/test_library.py::ItemFormattedMappingTest::test_formatted_item_value", "test/test_library.py::ItemFormattedMappingTest::test_get_method_with_default", "test/test_library.py::ItemFormattedMappingTest::test_get_method_with_specified_default", "test/test_library.py::ItemFormattedMappingTest::test_get_unset_field", "test/test_library.py::ItemFormattedMappingTest::test_item_precedence", "test/test_library.py::DestinationFunctionTest::test_first", "test/test_library.py::DestinationFunctionTest::test_first_different_sep", "test/test_library.py::DestinationFunctionTest::test_first_skip", "test/test_library.py::DestinationFunctionTest::test_if_def_false_complete", "test/test_library.py::DestinationFunctionTest::test_if_def_field_not_defined", "test/test_library.py::DestinationFunctionTest::test_if_def_field_not_defined_2", "test/test_library.py::DestinationFunctionTest::test_if_def_field_return_self", "test/test_library.py::DestinationFunctionTest::test_if_def_true", "test/test_library.py::DestinationFunctionTest::test_if_def_true_complete", "test/test_library.py::DestinationFunctionTest::test_if_else_false", "test/test_library.py::DestinationFunctionTest::test_if_else_false_value", "test/test_library.py::DestinationFunctionTest::test_if_false", "test/test_library.py::DestinationFunctionTest::test_if_false_value", "test/test_library.py::DestinationFunctionTest::test_if_int_value", "test/test_library.py::DestinationFunctionTest::test_if_true", "test/test_library.py::DestinationFunctionTest::test_left_variable", "test/test_library.py::DestinationFunctionTest::test_nonexistent_function", "test/test_library.py::DestinationFunctionTest::test_right_variable", "test/test_library.py::DestinationFunctionTest::test_title_case_variable", "test/test_library.py::DestinationFunctionTest::test_title_case_variable_aphostrophe", "test/test_library.py::DestinationFunctionTest::test_upper_case_literal", "test/test_library.py::DestinationFunctionTest::test_upper_case_variable", "test/test_library.py::DisambiguationTest::test_change_brackets", "test/test_library.py::DisambiguationTest::test_drop_empty_disambig_string", "test/test_library.py::DisambiguationTest::test_remove_brackets", "test/test_library.py::DisambiguationTest::test_unique_expands_to_disambiguating_year", "test/test_library.py::DisambiguationTest::test_unique_expands_to_nothing_for_distinct_albums", "test/test_library.py::DisambiguationTest::test_unique_falls_back_to_second_distinguishing_field", "test/test_library.py::DisambiguationTest::test_unique_sanitized", "test/test_library.py::DisambiguationTest::test_unique_with_default_arguments_uses_albumtype", "test/test_library.py::DisambiguationTest::test_use_fallback_numbers_when_identical", "test/test_library.py::PluginDestinationTest::test_plugin_value_not_substituted", "test/test_library.py::PluginDestinationTest::test_plugin_value_overrides_attribute", "test/test_library.py::PluginDestinationTest::test_plugin_value_sanitized", "test/test_library.py::PluginDestinationTest::test_undefined_value_not_substituted", "test/test_library.py::AlbumInfoTest::test_album_items_consistent", "test/test_library.py::AlbumInfoTest::test_albuminfo_change_albumartist_changes_items", "test/test_library.py::AlbumInfoTest::test_albuminfo_change_artist_does_not_change_items", "test/test_library.py::AlbumInfoTest::test_albuminfo_changes_affect_items", "test/test_library.py::AlbumInfoTest::test_albuminfo_for_two_items_doesnt_duplicate_row", "test/test_library.py::AlbumInfoTest::test_albuminfo_reflects_metadata", "test/test_library.py::AlbumInfoTest::test_albuminfo_remove_removes_items", "test/test_library.py::AlbumInfoTest::test_albuminfo_stores_art", "test/test_library.py::AlbumInfoTest::test_get_album_by_id", "test/test_library.py::AlbumInfoTest::test_individual_tracks_have_no_albuminfo", "test/test_library.py::AlbumInfoTest::test_noop_albuminfo_changes_affect_items", "test/test_library.py::AlbumInfoTest::test_removing_last_item_removes_album", "test/test_library.py::ArtDestinationTest::test_art_filename_respects_setting", "test/test_library.py::ArtDestinationTest::test_art_path_in_item_dir", "test/test_library.py::ArtDestinationTest::test_art_path_sanitized", "test/test_library.py::PathStringTest::test_art_destination_returns_bytestring", "test/test_library.py::PathStringTest::test_artpath_stores_special_chars", "test/test_library.py::PathStringTest::test_destination_returns_bytestring", "test/test_library.py::PathStringTest::test_fetched_item_path_is_bytestring", "test/test_library.py::PathStringTest::test_item_path_is_bytestring", "test/test_library.py::PathStringTest::test_sanitize_path_returns_unicode", "test/test_library.py::PathStringTest::test_sanitize_path_with_special_chars", "test/test_library.py::PathStringTest::test_special_char_path_added_to_database", "test/test_library.py::PathStringTest::test_special_chars_preserved_in_database", "test/test_library.py::PathStringTest::test_unicode_artpath_becomes_bytestring", "test/test_library.py::PathStringTest::test_unicode_artpath_in_database_decoded", "test/test_library.py::PathStringTest::test_unicode_in_database_becomes_bytestring", "test/test_library.py::PathStringTest::test_unicode_path_becomes_bytestring", "test/test_library.py::MtimeTest::test_mtime_initially_up_to_date", "test/test_library.py::MtimeTest::test_mtime_reset_on_db_modify", "test/test_library.py::MtimeTest::test_mtime_up_to_date_after_read", "test/test_library.py::MtimeTest::test_mtime_up_to_date_after_write", "test/test_library.py::ImportTimeTest::test_atime_for_singleton", "test/test_library.py::TemplateTest::test_album_and_item_format", "test/test_library.py::TemplateTest::test_album_flexattr_appears_in_item_template", "test/test_library.py::TemplateTest::test_year_formatted_in_template", "test/test_library.py::UnicodePathTest::test_unicode_path", "test/test_library.py::WriteTest::test_write_custom_tags", "test/test_library.py::WriteTest::test_write_date_field", "test/test_library.py::WriteTest::test_write_nonexistant", "test/test_library.py::WriteTest::test_write_with_custom_path", "test/test_library.py::ItemReadTest::test_nonexistent_raise_read_error", "test/test_library.py::ItemReadTest::test_unreadable_raise_read_error", "test/test_library.py::FilesizeTest::test_filesize", "test/test_library.py::FilesizeTest::test_nonexistent_file", "test/test_library.py::ParseQueryTest::test_parse_bytes", "test/test_library.py::ParseQueryTest::test_parse_invalid_query_string", "test/test_library.py::LibraryFieldTypesTest::test_datetype", "test/test_library.py::LibraryFieldTypesTest::test_durationtype", "test/test_library.py::LibraryFieldTypesTest::test_musicalkey", "test/test_library.py::LibraryFieldTypesTest::test_pathtype", "test/test_query.py::AnyFieldQueryTest::test_eq", "test/test_query.py::AnyFieldQueryTest::test_no_restriction", "test/test_query.py::AnyFieldQueryTest::test_restriction_completeness", "test/test_query.py::AnyFieldQueryTest::test_restriction_soundness", "test/test_query.py::GetTest::test_compilation_false", "test/test_query.py::GetTest::test_compilation_true", "test/test_query.py::GetTest::test_get_empty", "test/test_query.py::GetTest::test_get_no_matches", "test/test_query.py::GetTest::test_get_none", "test/test_query.py::GetTest::test_get_one_keyed_regexp", "test/test_query.py::GetTest::test_get_one_keyed_term", "test/test_query.py::GetTest::test_get_one_unkeyed_regexp", "test/test_query.py::GetTest::test_get_one_unkeyed_term", "test/test_query.py::GetTest::test_invalid_key", "test/test_query.py::GetTest::test_invalid_query", "test/test_query.py::GetTest::test_item_field_name_matches_nothing_in_album_query", "test/test_query.py::GetTest::test_key_case_insensitive", "test/test_query.py::GetTest::test_keyed_regexp_matches_only_one_column", "test/test_query.py::GetTest::test_keyed_term_matches_only_one_column", "test/test_query.py::GetTest::test_mixed_terms_regexps_narrow_search", "test/test_query.py::GetTest::test_multiple_regexps_narrow_search", "test/test_query.py::GetTest::test_multiple_terms_narrow_search", "test/test_query.py::GetTest::test_numeric_search_negative", "test/test_query.py::GetTest::test_numeric_search_positive", "test/test_query.py::GetTest::test_regexp_case_sensitive", "test/test_query.py::GetTest::test_single_year", "test/test_query.py::GetTest::test_singleton_false", "test/test_query.py::GetTest::test_singleton_true", "test/test_query.py::GetTest::test_term_case_insensitive", "test/test_query.py::GetTest::test_term_case_insensitive_with_key", "test/test_query.py::GetTest::test_unicode_query", "test/test_query.py::GetTest::test_unkeyed_regexp_matches_multiple_columns", "test/test_query.py::GetTest::test_unkeyed_term_matches_multiple_columns", "test/test_query.py::GetTest::test_unknown_field_name_no_results", "test/test_query.py::GetTest::test_unknown_field_name_no_results_in_album_query", "test/test_query.py::GetTest::test_year_range", "test/test_query.py::MatchTest::test_bitrate_range_negative", "test/test_query.py::MatchTest::test_bitrate_range_positive", "test/test_query.py::MatchTest::test_eq", "test/test_query.py::MatchTest::test_open_range", "test/test_query.py::MatchTest::test_regex_match_negative", "test/test_query.py::MatchTest::test_regex_match_non_string_value", "test/test_query.py::MatchTest::test_regex_match_positive", "test/test_query.py::MatchTest::test_substring_match_negative", "test/test_query.py::MatchTest::test_substring_match_non_string_value", "test/test_query.py::MatchTest::test_substring_match_positive", "test/test_query.py::MatchTest::test_year_match_negative", "test/test_query.py::MatchTest::test_year_match_positive", "test/test_query.py::PathQueryTest::test_case_sensitivity", "test/test_query.py::PathQueryTest::test_detect_absolute_path", "test/test_query.py::PathQueryTest::test_detect_relative_path", "test/test_query.py::PathQueryTest::test_escape_backslash", "test/test_query.py::PathQueryTest::test_escape_percent", "test/test_query.py::PathQueryTest::test_escape_underscore", "test/test_query.py::PathQueryTest::test_fragment_no_match", "test/test_query.py::PathQueryTest::test_no_match", "test/test_query.py::PathQueryTest::test_non_slashed_does_not_match_path", "test/test_query.py::PathQueryTest::test_nonnorm_path", "test/test_query.py::PathQueryTest::test_parent_directory_no_slash", "test/test_query.py::PathQueryTest::test_parent_directory_with_slash", "test/test_query.py::PathQueryTest::test_path_album_regex", "test/test_query.py::PathQueryTest::test_path_exact_match", "test/test_query.py::PathQueryTest::test_path_item_regex", "test/test_query.py::PathQueryTest::test_path_sep_detection", "test/test_query.py::PathQueryTest::test_slashed_query_matches_path", "test/test_query.py::PathQueryTest::test_slashes_in_explicit_field_does_not_match_path", "test/test_query.py::IntQueryTest::test_exact_value_match", "test/test_query.py::IntQueryTest::test_flex_dont_match_missing", "test/test_query.py::IntQueryTest::test_flex_range_match", "test/test_query.py::IntQueryTest::test_no_substring_match", "test/test_query.py::IntQueryTest::test_range_match", "test/test_query.py::BoolQueryTest::test_flex_parse_0", "test/test_query.py::BoolQueryTest::test_flex_parse_1", "test/test_query.py::BoolQueryTest::test_flex_parse_any_string", "test/test_query.py::BoolQueryTest::test_flex_parse_false", "test/test_query.py::BoolQueryTest::test_flex_parse_true", "test/test_query.py::BoolQueryTest::test_parse_true", "test/test_query.py::DefaultSearchFieldsTest::test_albums_matches_album", "test/test_query.py::DefaultSearchFieldsTest::test_albums_matches_albumartist", "test/test_query.py::DefaultSearchFieldsTest::test_items_does_not_match_year", "test/test_query.py::DefaultSearchFieldsTest::test_items_matches_title", "test/test_query.py::NoneQueryTest::test_match_after_set_none", "test/test_query.py::NoneQueryTest::test_match_singletons", "test/test_query.py::NoneQueryTest::test_match_slow", "test/test_query.py::NoneQueryTest::test_match_slow_after_set_none", "test/test_query.py::NotQueryMatchTest::test_bitrate_range_negative", "test/test_query.py::NotQueryMatchTest::test_bitrate_range_positive", "test/test_query.py::NotQueryMatchTest::test_open_range", "test/test_query.py::NotQueryMatchTest::test_regex_match_negative", "test/test_query.py::NotQueryMatchTest::test_regex_match_non_string_value", "test/test_query.py::NotQueryMatchTest::test_regex_match_positive", "test/test_query.py::NotQueryMatchTest::test_substring_match_negative", "test/test_query.py::NotQueryMatchTest::test_substring_match_non_string_value", "test/test_query.py::NotQueryMatchTest::test_substring_match_positive", "test/test_query.py::NotQueryMatchTest::test_year_match_negative", "test/test_query.py::NotQueryMatchTest::test_year_match_positive", "test/test_query.py::NotQueryTest::test_fast_vs_slow", "test/test_query.py::NotQueryTest::test_get_mixed_terms", "test/test_query.py::NotQueryTest::test_get_multiple_terms", "test/test_query.py::NotQueryTest::test_get_one_keyed_regexp", "test/test_query.py::NotQueryTest::test_get_one_unkeyed_regexp", "test/test_query.py::NotQueryTest::test_get_prefixes_keyed", "test/test_query.py::NotQueryTest::test_get_prefixes_unkeyed", "test/test_query.py::NotQueryTest::test_type_and", "test/test_query.py::NotQueryTest::test_type_anyfield", "test/test_query.py::NotQueryTest::test_type_boolean", "test/test_query.py::NotQueryTest::test_type_date", "test/test_query.py::NotQueryTest::test_type_false", "test/test_query.py::NotQueryTest::test_type_match", "test/test_query.py::NotQueryTest::test_type_none", "test/test_query.py::NotQueryTest::test_type_numeric", "test/test_query.py::NotQueryTest::test_type_or", "test/test_query.py::NotQueryTest::test_type_regexp", "test/test_query.py::NotQueryTest::test_type_substring", "test/test_query.py::NotQueryTest::test_type_true" ]
[]
MIT License
2,795
[ "docs/changelog.rst", "beets/importer.py", "beets/ui/__init__.py", "beets/library.py", "beetsplug/convert.py", "beets/dbcore/db.py" ]
[ "docs/changelog.rst", "beets/importer.py", "beets/ui/__init__.py", "beets/library.py", "beetsplug/convert.py", "beets/dbcore/db.py" ]
pre-commit__pre-commit-803
f2da2c435c1123c4edc4ca9701c245cc25b0a50d
2018-07-20 01:49:48
cf691e85c89dbe16dce7e0a729649b2e19d4d9ad
diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py index b1549d4..dbf5641 100644 --- a/pre_commit/commands/run.py +++ b/pre_commit/commands/run.py @@ -256,7 +256,7 @@ def run(runner, store, args, environ=os.environ): for _, hook in repo.hooks: if ( (not args.hook or hook['id'] == args.hook) and - not hook['stages'] or args.hook_stage in hook['stages'] + (not hook['stages'] or args.hook_stage in hook['stages']) ): repo_hooks.append((repo, hook))
`stages: [commit]` hooks will run with `pre-commit run otherhookid` minor logic bug, good new-contributor ticket Easy to reproduce on pre-commit itself: ```diff diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a146bd2..7bb382d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,7 @@ repos: rev: v1.2.3 hooks: - id: trailing-whitespace + stages: [commit] - id: end-of-file-fixer - id: autopep8-wrapper - id: check-docstring-first ``` ```console $ pre-commit run end-of-file-fixer --all-files Trim Trailing Whitespace.................................................Passed Fix End of Files.........................................................Passed ``` (it should have only run `end-of-file-fixer` but also run `trailing-whitespace` due to a logic error).
pre-commit/pre-commit
diff --git a/tests/commands/run_test.py b/tests/commands/run_test.py index 70a6b6e..e6258d3 100644 --- a/tests/commands/run_test.py +++ b/tests/commands/run_test.py @@ -762,3 +762,34 @@ def test_include_exclude_does_search_instead_of_match(some_filenames): def test_include_exclude_exclude_removes_files(some_filenames): ret = _filter_by_include_exclude(some_filenames, '', r'\.py$') assert ret == ['.pre-commit-hooks.yaml'] + + +def test_args_hook_only(cap_out, store, repo_with_passing_hook): + config = OrderedDict(( + ('repo', 'local'), + ( + 'hooks', ( + OrderedDict(( + ('id', 'flake8'), + ('name', 'flake8'), + ('entry', "'{}' -m flake8".format(sys.executable)), + ('language', 'system'), + ('stages', ['commit']), + )), OrderedDict(( + ('id', 'do_not_commit'), + ('name', 'Block if "DO NOT COMMIT" is found'), + ('entry', 'DO NOT COMMIT'), + ('language', 'pygrep'), + )), + ), + ), + )) + add_config_to_repo(repo_with_passing_hook, config) + stage_a_file() + ret, printed = _do_run( + cap_out, + store, + repo_with_passing_hook, + run_opts(hook='do_not_commit'), + ) + assert b'flake8' not in printed
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
1.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "coverage", "pytest", "pytest-env" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aspy.yaml==1.3.0 attrs==22.2.0 cached-property==1.5.2 certifi==2021.5.30 cfgv==3.3.1 coverage==6.2 distlib==0.3.9 filelock==3.4.1 flake8==5.0.4 identify==2.4.4 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mccabe==0.7.0 mock==5.2.0 nodeenv==1.6.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 -e git+https://github.com/pre-commit/pre-commit.git@f2da2c435c1123c4edc4ca9701c245cc25b0a50d#egg=pre_commit py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pyparsing==3.1.4 pytest==7.0.1 pytest-env==0.6.2 PyYAML==6.0.1 six==1.17.0 toml==0.10.2 tomli==1.2.3 typing_extensions==4.1.1 virtualenv==20.17.1 zipp==3.6.0
name: pre-commit channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aspy-yaml==1.3.0 - attrs==22.2.0 - cached-property==1.5.2 - cfgv==3.3.1 - coverage==6.2 - distlib==0.3.9 - filelock==3.4.1 - flake8==5.0.4 - identify==2.4.4 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mccabe==0.7.0 - mock==5.2.0 - nodeenv==1.6.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-env==0.6.2 - pyyaml==6.0.1 - six==1.17.0 - toml==0.10.2 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/pre-commit
[ "tests/commands/run_test.py::test_args_hook_only" ]
[]
[ "tests/commands/run_test.py::test_run_all_hooks_failing", "tests/commands/run_test.py::test_arbitrary_bytes_hook", "tests/commands/run_test.py::test_hook_that_modifies_but_returns_zero", "tests/commands/run_test.py::test_types_hook_repository", "tests/commands/run_test.py::test_exclude_types_hook_repository", "tests/commands/run_test.py::test_global_exclude", "tests/commands/run_test.py::test_show_diff_on_failure", "tests/commands/run_test.py::test_run[options0-outputs0-0-True]", "tests/commands/run_test.py::test_run[options1-outputs1-0-True]", "tests/commands/run_test.py::test_run[options2-outputs2-0-True]", "tests/commands/run_test.py::test_run[options3-outputs3-1-True]", "tests/commands/run_test.py::test_run[options4-outputs4-0-True]", "tests/commands/run_test.py::test_run[options5-outputs5-0-True]", "tests/commands/run_test.py::test_run[options6-outputs6-0-False]", "tests/commands/run_test.py::test_run_output_logfile", "tests/commands/run_test.py::test_always_run", "tests/commands/run_test.py::test_always_run_alt_config", "tests/commands/run_test.py::test_hook_verbose_enabled", "tests/commands/run_test.py::test_origin_source_error_msg_error[master-]", "tests/commands/run_test.py::test_origin_source_error_msg_error[-master]", "tests/commands/run_test.py::test_origin_source_both_ok", "tests/commands/run_test.py::test_has_unmerged_paths", "tests/commands/run_test.py::test_merge_conflict", "tests/commands/run_test.py::test_merge_conflict_modified", "tests/commands/run_test.py::test_merge_conflict_resolved", "tests/commands/run_test.py::test_compute_cols[hooks0-True-80]", "tests/commands/run_test.py::test_compute_cols[hooks1-False-81]", "tests/commands/run_test.py::test_compute_cols[hooks2-True-85]", "tests/commands/run_test.py::test_compute_cols[hooks3-False-82]", "tests/commands/run_test.py::test_get_skips[environ0-expected_output0]", "tests/commands/run_test.py::test_get_skips[environ1-expected_output1]", "tests/commands/run_test.py::test_get_skips[environ2-expected_output2]", "tests/commands/run_test.py::test_get_skips[environ3-expected_output3]", "tests/commands/run_test.py::test_get_skips[environ4-expected_output4]", "tests/commands/run_test.py::test_get_skips[environ5-expected_output5]", "tests/commands/run_test.py::test_get_skips[environ6-expected_output6]", "tests/commands/run_test.py::test_skip_hook", "tests/commands/run_test.py::test_hook_id_not_in_non_verbose_output", "tests/commands/run_test.py::test_hook_id_in_verbose_output", "tests/commands/run_test.py::test_multiple_hooks_same_id", "tests/commands/run_test.py::test_non_ascii_hook_id", "tests/commands/run_test.py::test_stdout_write_bug_py26", "tests/commands/run_test.py::test_lots_of_files", "tests/commands/run_test.py::test_stages", "tests/commands/run_test.py::test_commit_msg_hook", "tests/commands/run_test.py::test_local_hook_passes", "tests/commands/run_test.py::test_local_hook_fails", "tests/commands/run_test.py::test_pcre_deprecation_warning", "tests/commands/run_test.py::test_meta_hook_passes", "tests/commands/run_test.py::test_error_with_unstaged_config", "tests/commands/run_test.py::test_no_unstaged_error_with_all_files_or_files[opts0]", "tests/commands/run_test.py::test_no_unstaged_error_with_all_files_or_files[opts1]", "tests/commands/run_test.py::test_files_running_subdir", "tests/commands/run_test.py::test_pass_filenames[True-hook_args0-foo.py]", "tests/commands/run_test.py::test_pass_filenames[False-hook_args1-]", "tests/commands/run_test.py::test_pass_filenames[True-hook_args2-some", "tests/commands/run_test.py::test_pass_filenames[False-hook_args3-some", "tests/commands/run_test.py::test_fail_fast", "tests/commands/run_test.py::test_include_exclude_base_case", "tests/commands/run_test.py::test_matches_broken_symlink", "tests/commands/run_test.py::test_include_exclude_total_match", "tests/commands/run_test.py::test_include_exclude_does_search_instead_of_match", "tests/commands/run_test.py::test_include_exclude_exclude_removes_files" ]
[]
MIT License
2,796
[ "pre_commit/commands/run.py" ]
[ "pre_commit/commands/run.py" ]
hgrecco__pint-658
ba5cc0e76243c3e1e2b82999a71425ce3c6f3c1e
2018-07-20 01:55:48
bc754ae302b0c03d1802daddcd76c103a5fdfb67
diff --git a/docs/wrapping.rst b/docs/wrapping.rst index dd9a39f..6ce6411 100644 --- a/docs/wrapping.rst +++ b/docs/wrapping.rst @@ -246,3 +246,12 @@ In the decorator format: ... def pendulum_period(length): ... return 2*math.pi*math.sqrt(length/G) +If you just want to check the dimensionality of a quantity, you can do so with the built-in 'check' function. + +.. doctest:: + + >>> distance = 1 * ureg.m + >>> distance.check('[length]') + True + >>> distance.check('[time]') + False diff --git a/pint/quantity.py b/pint/quantity.py index 88bfdac..ef25509 100644 --- a/pint/quantity.py +++ b/pint/quantity.py @@ -294,6 +294,11 @@ class _Quantity(PrettyIPython, SharedRegistryObject): return self._dimensionality + def check(self, dimension): + """Return true if the quantity's dimension matches passed dimension. + """ + return self.dimensionality == dimension + @classmethod def from_tuple(cls, tup): return cls(tup[0], UnitsContainer(tup[1])) diff --git a/pint/registry_helpers.py b/pint/registry_helpers.py index 4a8b862..5ac0dbd 100644 --- a/pint/registry_helpers.py +++ b/pint/registry_helpers.py @@ -130,6 +130,20 @@ def _parse_wrap_args(args, registry=None): return _converter +def _apply_defaults(func, args, kwargs): + """Apply default keyword arguments. + + Named keywords may have been left blank. This function applies the default + values so that every argument is defined. + """ + + sig = signature(func) + bound_arguments = sig.bind(*args) + for param in sig.parameters.values(): + if param.name not in bound_arguments.arguments: + bound_arguments.arguments[param.name] = param.default + args = [bound_arguments.arguments[key] for key in sig.parameters.keys()] + return args, {} def wraps(ureg, ret, args, strict=True): """Wraps a function to become pint-aware. @@ -171,18 +185,7 @@ def wraps(ureg, ret, args, strict=True): @functools.wraps(func, assigned=assigned, updated=updated) def wrapper(*values, **kw): - - # Named keywords may have been left blank. Wherever the named keyword is blank, - # fill it in with the default value. - sig = signature(func) - bound_arguments = sig.bind(*values, **kw) - - for param in sig.parameters.values(): - if param.name not in bound_arguments.arguments: - bound_arguments.arguments[param.name] = param.default - - values = [bound_arguments.arguments[key] for key in sig.parameters.keys()] - kw = {} + values, kw = _apply_defaults(func, values, kw) # In principle, the values are used as is # When then extract the magnitudes when needed. @@ -228,13 +231,17 @@ def check(ureg, *args): @functools.wraps(func, assigned=assigned, updated=updated) def wrapper(*values, **kwargs): - for dim, value in zip_longest(dimensions, values): + values, kwargs = _apply_defaults(func, values, kwargs) + if len(dimensions) > len(values): + raise TypeError("%s takes %i parameters, but %i dimensions were passed" + % (func.__name__, len(values), len(dimensions))) + for dim, value in zip(dimensions, values): if dim is None: continue - val_dim = ureg.get_dimensionality(value) - if val_dim != dim: + if not ureg.Quantity(value).check(dim): + val_dim = ureg.get_dimensionality(value) raise DimensionalityError(value, 'a quantity of', val_dim, dim) return func(*values, **kwargs)
A check function that's not a decorator Is there a way we could implement a check function for stand-alone use, not just for wrapping functions? ```python >>> from pint import _DEFAULT_REGISTRY >>> ureg = _DEFAULT_REGISTRY >>> Q_ = ureg.Quantity >>> distance = Q_('1 meter') >>> time = Q_('1 second') >>> ureg.check(distance, '[length]') True >>> ureg.check(time, '[length]') False ```
hgrecco/pint
diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 0cefce9..4dbcb98 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -625,3 +625,38 @@ class TestIssuesNP(QuantityTestCase): d2 = get_displacement(Q_(2, 's'), Q_(1, 'deg/s')) self.assertAlmostEqual(d2, Q_(2,' deg')) + + def test_issue655a(self): + ureg = UnitRegistry() + distance = 1 * ureg.m + time = 1 * ureg.s + velocity = distance / time + self.assertEqual(distance.check('[length]'), True) + self.assertEqual(distance.check('[time]'), False) + self.assertEqual(velocity.check('[length] / [time]'), True) + self.assertEqual(velocity.check('1 / [time] * [length]'), True) + + def test_issue(self): + import math + try: + from inspect import signature + except ImportError: + # Python2 does not have the inspect library. Import the backport + from funcsigs import signature + + ureg = UnitRegistry() + Q_ = ureg.Quantity + @ureg.check('[length]', '[length]/[time]^2') + def pendulum_period(length, G=Q_(1, 'standard_gravity')): + print(length) + return (2*math.pi*(length/G)**.5).to('s') + l = 1 * ureg.m + # Assume earth gravity + t = pendulum_period(l) + self.assertAlmostEqual(t, Q_('2.0064092925890407 second')) + # Use moon gravity + moon_gravity = Q_(1.625, 'm/s^2') + t = pendulum_period(l, moon_gravity) + self.assertAlmostEqual(t, Q_('4.928936075204336 second')) + + diff --git a/pint/testsuite/test_unit.py b/pint/testsuite/test_unit.py index 0e7b17f..b9e3b06 100644 --- a/pint/testsuite/test_unit.py +++ b/pint/testsuite/test_unit.py @@ -437,13 +437,13 @@ class TestRegistry(QuantityTestCase): g2 = ureg.check('[speed]')(gfunc) self.assertRaises(DimensionalityError, g2, 3.0, 1) - self.assertRaises(DimensionalityError, g2, 2 * ureg.parsec) + self.assertRaises(TypeError, g2, 2 * ureg.parsec) self.assertRaises(DimensionalityError, g2, 2 * ureg.parsec, 1.0) self.assertEqual(g2(2.0 * ureg.km / ureg.hour, 2), 1 * ureg.km / ureg.hour) g3 = ureg.check('[speed]', '[time]', '[mass]')(gfunc) - self.assertRaises(DimensionalityError, g3, 1 * ureg.parsec, 1 * ureg.angstrom) - self.assertRaises(DimensionalityError, g3, 1 * ureg.parsec, 1 * ureg.angstrom, 1 * ureg.kilogram) + self.assertRaises(TypeError, g3, 1 * ureg.parsec, 1 * ureg.angstrom) + self.assertRaises(TypeError, g3, 1 * ureg.parsec, 1 * ureg.angstrom, 1 * ureg.kilogram) def test_to_ref_vs_to(self): self.ureg.autoconvert_offset_to_baseunit = True
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 3 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work packaging @ file:///croot/packaging_1734472117206/work -e git+https://github.com/hgrecco/pint.git@ba5cc0e76243c3e1e2b82999a71425ce3c6f3c1e#egg=Pint pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: pint channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/pint
[ "pint/testsuite/test_unit.py::TestRegistry::test_check", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_check" ]
[]
[ "pint/testsuite/test_issues.py::TestIssues::test_alternative_angstrom_definition", "pint/testsuite/test_issues.py::TestIssues::test_angstrom_creation", "pint/testsuite/test_issues.py::TestIssues::test_issue104", "pint/testsuite/test_issues.py::TestIssues::test_issue105", "pint/testsuite/test_issues.py::TestIssues::test_issue121", "pint/testsuite/test_issues.py::TestIssues::test_issue170", "pint/testsuite/test_issues.py::TestIssues::test_issue29", "pint/testsuite/test_issues.py::TestIssues::test_issue52", "pint/testsuite/test_issues.py::TestIssues::test_issue523", "pint/testsuite/test_issues.py::TestIssues::test_issue54", "pint/testsuite/test_issues.py::TestIssues::test_issue54_related", "pint/testsuite/test_issues.py::TestIssues::test_issue61", "pint/testsuite/test_issues.py::TestIssues::test_issue61_notNP", "pint/testsuite/test_issues.py::TestIssues::test_issue66", "pint/testsuite/test_issues.py::TestIssues::test_issue66b", "pint/testsuite/test_issues.py::TestIssues::test_issue69", "pint/testsuite/test_issues.py::TestIssues::test_issue85", "pint/testsuite/test_issues.py::TestIssues::test_issue86", "pint/testsuite/test_issues.py::TestIssues::test_issue93", "pint/testsuite/test_issues.py::TestIssues::test_issues86b", "pint/testsuite/test_issues.py::TestIssues::test_micro_creation", "pint/testsuite/test_unit.py::TestUnit::test_creation", "pint/testsuite/test_unit.py::TestUnit::test_deepcopy", "pint/testsuite/test_unit.py::TestUnit::test_dimensionality", "pint/testsuite/test_unit.py::TestUnit::test_dimensionless", "pint/testsuite/test_unit.py::TestUnit::test_ipython", "pint/testsuite/test_unit.py::TestUnit::test_unit_casting", "pint/testsuite/test_unit.py::TestUnit::test_unit_cmp", "pint/testsuite/test_unit.py::TestUnit::test_unit_default_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_div", "pint/testsuite/test_unit.py::TestUnit::test_unit_eqs", "pint/testsuite/test_unit.py::TestUnit::test_unit_formatting", "pint/testsuite/test_unit.py::TestUnit::test_unit_hash", "pint/testsuite/test_unit.py::TestUnit::test_unit_mul", "pint/testsuite/test_unit.py::TestUnit::test_unit_pow", "pint/testsuite/test_unit.py::TestUnit::test_unit_rdiv", "pint/testsuite/test_unit.py::TestUnit::test_unit_repr", "pint/testsuite/test_unit.py::TestRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistry::test_wraps", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_context_sp", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_base_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_get_compatible_units", "pint/testsuite/test_unit.py::TestCompatibleUnits::test_many", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_as_delta", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_base", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_convert_parse_str", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_default_format", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_define", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_imperial_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_lazy", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_load", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_name", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_alias", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_complex", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_factor", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_mul_div", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_number", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_plural", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_prefix", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_pretty", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_single", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_parse_units", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_pint", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_redefinition", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_rep_and_parse", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_repeated_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_singular_SI_prefix_convert", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_str_errors", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_symbol", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_to_ref_vs_to", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wrap_referencing", "pint/testsuite/test_unit.py::TestRegistryWithDefaultRegistry::test_wraps", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00001", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00002", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00003", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00004", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00005", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00006", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00007", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00008", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00009", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00010", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00011", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00012", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00013", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00014", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00015", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00016", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00017", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00018", "pint/testsuite/test_unit.py::TestConvertWithOffset::test_to_and_from_offset_units_00019" ]
[]
BSD
2,797
[ "docs/wrapping.rst", "pint/quantity.py", "pint/registry_helpers.py" ]
[ "docs/wrapping.rst", "pint/quantity.py", "pint/registry_helpers.py" ]
PlasmaPy__PlasmaPy-517
0a5eb2eb13d06b2cc9b758a82bcf888197c105af
2018-07-20 03:28:18
24113f1659d809930288374f6b1f95dc573aff47
diff --git a/plasmapy/atomic/particle_class.py b/plasmapy/atomic/particle_class.py index b7e7ed5c..1a73b294 100644 --- a/plasmapy/atomic/particle_class.py +++ b/plasmapy/atomic/particle_class.py @@ -113,9 +113,10 @@ class Particle: Parameters ---------- - argument : `str` or `int` - A string representing a particle, element, isotope, or ion; or - an integer representing the atomic number of an element. + argument : `str`, `int`, or `~plasmapy.atomic.Particle` + A string representing a particle, element, isotope, or ion; an + integer representing the atomic number of an element; or a + `Particle` instance. mass_numb : `int`, optional The mass number of an isotope or nuclide. @@ -238,6 +239,25 @@ class Particle: >>> ~positron Particle("e-") + A `~plasmapy.atomic.Particle` instance may be used as the first + argument to `~plasmapy.atomic.Particle`. + + >>> iron = Particle('Fe') + >>> iron == Particle(iron) + True + >>> Particle(iron, mass_numb=56, Z=6) + Particle("Fe-56 6+") + + If the previously constructed `~plasmapy.atomic.Particle` instance + represents an element, then the `Z` and `mass_numb` arguments may be + used to specify an ion or isotope. + + >>> iron = Particle('Fe') + >>> Particle(iron, Z=1) + Particle("Fe 1+") + >>> Particle(iron, mass_numb=56) + Particle("Fe-56") + The `~plasmapy.atomic.particle_class.Particle.categories` attribute and `~plasmapy.atomic.particle_class.Particle.is_category` method may be used to find and test particle membership in categories. @@ -257,14 +277,22 @@ class Particle: def __init__(self, argument: Union[str, int], mass_numb: int = None, Z: int = None): """ - Initialize a `~plasmapy.atomic.Particle` object and set private + Instantiate a `~plasmapy.atomic.Particle` object and set private attributes. """ - if not isinstance(argument, (int, str)): + if not isinstance(argument, (int, np.integer, str, Particle)): raise TypeError( "The first positional argument when creating a Particle " - "object must be either an integer or string.") + "object must be either an integer, string, or another" + "Particle object.") + + # If argument is a Particle instance, then we will construct a + # new Particle instance for the same Particle (essentially a + # copy). + + if isinstance(argument, Particle): + argument = argument.particle if mass_numb is not None and not isinstance(mass_numb, int): raise TypeError("mass_numb is not an integer")
Have Particle class return corresponding Particle instance for Particle arguments I've run into a bunch of situations where I have some objects that are either `Particle` class instances or `str`/`int` representations of particles, and I want to make sure that they are all `Particle` class instances. I have usually written code like this: ```Python >>> orig_particles = ['H', Particle('He')] >>> particles = [ particle if isinstance(particle, Particle) else Particle(particle) for particle in orig_particles ] ``` This code is rather obfuscating since it checks on whether or not `particle` is a `Particle` class instance. It would be simpler to have `Particle` return the corresponding `Particle` instance which would allow code like this: ```Python >>> particles = [Particle(particle) for particle in particles] ``` @StanczakDominik suggested that this be a copy of the `Particle` instance rather than returning self, which is like how `numpy.array` behaves. A good test would be to check that `Particle(something) == Particle(Particle(something))`, e.g., something like: ```Python from plasmapy.atomic import Particle def test_particle_of_particle(particle_representation): particle = Particle(particle_representation) particle_of_particle = Particle(particle) assert particle == particle_of_particle assert particle is not particle_of_particle # to make sure it is a copy rather than self ``` Thanks! -Nick
PlasmaPy/PlasmaPy
diff --git a/plasmapy/atomic/tests/test_particle_class.py b/plasmapy/atomic/tests/test_particle_class.py index 9d3308e0..1cb727c4 100644 --- a/plasmapy/atomic/tests/test_particle_class.py +++ b/plasmapy/atomic/tests/test_particle_class.py @@ -377,6 +377,17 @@ 'is_category("boson", exclude="boson")': AtomicError, 'is_category(any_of="boson", exclude="boson")': AtomicError, }), + + (Particle('C'), {}, + {'particle': 'C', + }), + + (Particle('C'), {'Z': 3, 'mass_numb': 14}, + {'particle': 'C-14 3+', + 'element': 'C', + 'isotope': 'C-14', + 'ionic_symbol': 'C-14 3+', + }), ] @@ -478,6 +489,8 @@ def test_Particle_equivalent_cases(equivalent_particles): ('Fe', {}, '.spin', MissingAtomicDataError), ('nu_e', {}, '.mass', MissingAtomicDataError), ('Og', {}, '.standard_atomic_weight', MissingAtomicDataError), + (Particle('C-14'), {'mass_numb': 13}, "", InvalidParticleError), + (Particle('Au 1+'), {'Z': 2}, "", InvalidParticleError), ([], {}, "", TypeError), ] @@ -707,3 +720,25 @@ def test_antiparticle_attribute_and_operator(self, particle, opposite): (f"{repr(particle)}.antiparticle returned " f"{particle.antiparticle}, whereas ~{repr(particle)} " f"returned {~particle}.") + + [email protected]('arg', ['e-', 'D+', 'Fe 25+', 'H-', 'mu+']) +def test_particleing_a_particle(arg): + """ + Test that Particle(arg) is equal to Particle(Particle(arg)), but is + not the same object in memory. + """ + particle = Particle(arg) + + assert particle == Particle(particle), ( + f"Particle({repr(arg)}) does not equal " + f"Particle(Particle({repr(arg)}).") + + assert particle == Particle(Particle(Particle(particle))), ( + f"Particle({repr(arg)}) does not equal " + f"Particle(Particle(Particle({repr(arg)})).") + + assert particle is not Particle(particle), ( + f"Particle({repr(arg)}) is the same object in memory as " + f"Particle(Particle({repr(arg)})), when it is intended to " + f"create a new object in memory (e.g., a copy).")
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "numpy>=1.16.0 astropy", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asteval==1.0.6 astropy @ file:///croot/astropy_1697468907928/work colorama==0.4.6 contourpy==1.3.0 cycler==0.12.1 Cython==3.0.12 dill==0.3.9 exceptiongroup==1.2.2 fonttools==4.56.0 h5py==3.13.0 importlib_resources==6.5.2 iniconfig==2.1.0 kiwisolver==1.4.7 lmfit==1.3.3 matplotlib==3.9.4 mpmath==1.3.0 numpy @ file:///croot/numpy_and_numpy_base_1708638617955/work/dist/numpy-1.26.4-cp39-cp39-linux_x86_64.whl#sha256=b69ac3eb7538c5a224df429dc49031914fb977825ee007f2c77a13f7aa6cd769 packaging @ file:///croot/packaging_1734472117206/work pillow==11.1.0 -e git+https://github.com/PlasmaPy/PlasmaPy.git@0a5eb2eb13d06b2cc9b758a82bcf888197c105af#egg=plasmapy pluggy==1.5.0 pyerfa @ file:///croot/pyerfa_1738082786199/work pyparsing==3.2.3 pytest==8.3.5 python-dateutil==2.9.0.post0 PyYAML @ file:///croot/pyyaml_1728657952215/work roman==5.0 scipy==1.13.1 six==1.17.0 tomli==2.2.1 uncertainties==3.2.2 zipp==3.21.0
name: PlasmaPy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - astropy=5.3.4=py39ha9d4c09_0 - blas=1.0=openblas - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgfortran-ng=11.2.0=h00389a5_1 - libgfortran5=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libopenblas=0.3.21=h043d6bf_0 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - numpy=1.26.4=py39heeff2f4_0 - numpy-base=1.26.4=py39h8a23956_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pyerfa=2.0.1.5=py39h5eee18b_0 - python=3.9.21=he870216_1 - pyyaml=6.0.2=py39h5eee18b_0 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - yaml=0.2.5=h7b6447c_0 - zlib=1.2.13=h5eee18b_1 - pip: - asteval==1.0.6 - colorama==0.4.6 - contourpy==1.3.0 - cycler==0.12.1 - cython==3.0.12 - dill==0.3.9 - exceptiongroup==1.2.2 - fonttools==4.56.0 - h5py==3.13.0 - importlib-resources==6.5.2 - iniconfig==2.1.0 - kiwisolver==1.4.7 - lmfit==1.3.3 - matplotlib==3.9.4 - mpmath==1.3.0 - pillow==11.1.0 - pluggy==1.5.0 - pyparsing==3.2.3 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - roman==5.0 - scipy==1.13.1 - six==1.17.0 - tomli==2.2.1 - uncertainties==3.2.2 - zipp==3.21.0 prefix: /opt/conda/envs/PlasmaPy
[ "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Particle(\"C\")-kwargs16-expected_dict16]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Particle(\"C\")-kwargs17-expected_dict17]", "plasmapy/atomic/tests/test_particle_class.py::test_particleing_a_particle[e-]", "plasmapy/atomic/tests/test_particle_class.py::test_particleing_a_particle[D+]", "plasmapy/atomic/tests/test_particle_class.py::test_particleing_a_particle[Fe", "plasmapy/atomic/tests/test_particle_class.py::test_particleing_a_particle[H-]", "plasmapy/atomic/tests/test_particle_class.py::test_particleing_a_particle[mu+]" ]
[ "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Li-kwargs12-expected_dict12]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[a-kwargs0--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[d+-kwargs1--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[H-kwargs2--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-818-kwargs3--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-12-kwargs4--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-kwargs5--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Au-kwargs6--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs7--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs8-.atomic_number-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[alpha-kwargs9-.standard_atomic_weight-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Fe-56-kwargs10-.standard_atomic_weight-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[e--kwargs11-.standard_atomic_weight-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[tau--kwargs12-.element_name-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[tau+-kwargs13-.atomic_number-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[neutron-kwargs14-.atomic_number-InvalidElementError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[H-kwargs15-.mass_number-InvalidIsotopeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[neutron-kwargs16-.mass_number-InvalidIsotopeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[He-kwargs17-.charge-ChargeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[He-kwargs18-.integer_charge-ChargeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Fe-kwargs19-.spin-MissingAtomicDataError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[nu_e-kwargs20-.mass-MissingAtomicDataError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Og-kwargs21-.standard_atomic_weight-MissingAtomicDataError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Particle(\"C-14\")-kwargs22--InvalidParticleError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[Particle(\"Au", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_errors[arg24-kwargs24--TypeError]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[H-----kwargs0--AtomicWarning]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[alpha-kwargs1--AtomicWarning]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_warnings[alpha-kwargs2--AtomicWarning]" ]
[ "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[neutron-kwargs0-expected_dict0]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[p+-kwargs1-expected_dict1]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[p--kwargs2-expected_dict2]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[e--kwargs3-expected_dict3]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[e+-kwargs4-expected_dict4]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H-kwargs5-expected_dict5]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[H-1", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[D+-kwargs8-expected_dict8]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[tritium-kwargs9-expected_dict9]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Fe-kwargs10-expected_dict10]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[alpha-kwargs11-expected_dict11]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[Cn-276-kwargs13-expected_dict13]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[muon-kwargs14-expected_dict14]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_class[nu_tau-kwargs15-expected_dict15]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles0]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles1]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles2]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles3]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles4]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles5]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles6]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles7]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles8]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles9]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_equivalent_cases[equivalent_particles10]", "plasmapy/atomic/tests/test_particle_class.py::test_Particle_cmp", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[n-neutron]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[p+-proton]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[H-1-p+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[H-1", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[D-D+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[T-T+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[He-4-alpha]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_class_mass_nuclide_mass[Fe-56-Fe-56", "plasmapy/atomic/tests/test_particle_class.py::test_particle_half_life_string", "plasmapy/atomic/tests/test_particle_class.py::test_particle_is_electron[Particle(\"e-\")-True]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_is_electron[Particle(\"p+\")-False]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_bool_error", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[p+-p-]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[n-antineutron]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[e--e+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[mu--mu+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[tau--tau+]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_e-anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_mu-anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::test_particle_inversion[nu_tau-anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[p+-p-]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[n-antineutron]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[e--e+]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[mu--mu+]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[tau--tau+]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_e-anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_mu-anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::test_antiparticle_inversion[nu_tau-anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::test_unary_operator_for_elements", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_inverted_inversion[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_opposite_charge[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_equal_mass[nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[mu+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[mu-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[n]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[tau-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[p-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[antineutron]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[e-]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[p+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[tau+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[e+]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_tau]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_e]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[anti_nu_mu]", "plasmapy/atomic/tests/test_particle_class.py::Test_antiparticle_properties_inversion::test_antiparticle_attribute_and_operator[nu_mu]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,798
[ "plasmapy/atomic/particle_class.py" ]
[ "plasmapy/atomic/particle_class.py" ]
QualiSystems__cloudshell-networking-juniper-48
118ac36e83190764bb65ded2c431aa129451687c
2018-07-20 15:20:13
118ac36e83190764bb65ded2c431aa129451687c
diff --git a/.gitignore b/.gitignore index 4d86431..a1ea7c8 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,5 @@ target/ .pypirc .DS_Store + +.idea/ diff --git a/cloudshell/networking/juniper/autoload/juniper_snmp_autoload.py b/cloudshell/networking/juniper/autoload/juniper_snmp_autoload.py index aea017e..30f6513 100755 --- a/cloudshell/networking/juniper/autoload/juniper_snmp_autoload.py +++ b/cloudshell/networking/juniper/autoload/juniper_snmp_autoload.py @@ -294,7 +294,7 @@ class JuniperSnmpAutoload(object): model = '' os_version = '' sys_obj_id = self.snmp_handler.get_property('SNMPv2-MIB', 'sysObjectID', 0) - model_search = re.search('^(?P<vendor>\w+)-\S+jnxProductName(?P<model>\S+)', sys_obj_id) + model_search = re.search('^(?P<vendor>\w+)-\S+jnxProduct(?:Name)?(?P<model>\S+)', sys_obj_id) if model_search: vendor = model_search.groupdict()['vendor'].capitalize() model = model_search.groupdict()['model'] diff --git a/version.txt b/version.txt index 2d2d681..30b26df 100644 --- a/version.txt +++ b/version.txt @@ -1,1 +1,1 @@ -4.0.10 +4.0.11
Vendor and Model attributes sometimes empty In some cases, vendor and Model attributes are empty during the autoload. For example when sysObjectID = jnxProductQFX520032C32Q.
QualiSystems/cloudshell-networking-juniper
diff --git a/tests/networking/juniper/autoload/test_juniper_snmp_autoload.py b/tests/networking/juniper/autoload/test_juniper_snmp_autoload.py index e80ff56..9d4baa1 100644 --- a/tests/networking/juniper/autoload/test_juniper_snmp_autoload.py +++ b/tests/networking/juniper/autoload/test_juniper_snmp_autoload.py @@ -159,6 +159,34 @@ class TestJuniperSnmpAutoload(TestCase): call('SNMPv2-MIB', 'sysLocation', '0')] self._snmp_handler.get_property.assert_has_calls(calls) + def test_build_root2(self): + vendor = 'Test_Vendor' + model = 'Tets_Model' + version = '12.1R6.5' + contact_name = Mock() + system_name = Mock() + location = Mock() + self._snmp_handler.get_property.side_effect = [ + "{0}-testjnxProduct{1}".format(vendor, model), + "TEst JUNOS {} #/test".format(version), + contact_name, + system_name, + location + ] + + self._autoload_operations_instance._build_root() + + self.assertIs(self._autoload_operations_instance.resource.contact_name, contact_name) + self.assertIs(self._autoload_operations_instance.resource.system_name, system_name) + self.assertIs(self._autoload_operations_instance.resource.location, location) + self.assertEqual(self._resource.os_version, version) + self.assertEqual(self._resource.vendor, vendor.capitalize()) + self.assertEqual(self._resource.model, model) + calls = [call('SNMPv2-MIB', 'sysObjectID', 0), call('SNMPv2-MIB', 'sysDescr', '0'), + call('SNMPv2-MIB', 'sysContact', '0'), call('SNMPv2-MIB', 'sysName', '0'), + call('SNMPv2-MIB', 'sysLocation', '0')] + self._snmp_handler.get_property.assert_has_calls(calls) + def test_get_content_indexes(self): index1 = 1 index2 = 2
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 3 }
4.0
{ "env_vars": null, "env_yml_path": [ "pytest-asyncio-config.yml" ], "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio", "mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cloudshell-automation-api==7.0.0.132 cloudshell-cli==3.1.2 cloudshell-core==2.2.180 cloudshell-networking==5.0.0 cloudshell-networking-devices==2.0.8 -e git+https://github.com/QualiSystems/cloudshell-networking-juniper.git@118ac36e83190764bb65ded2c431aa129451687c#egg=cloudshell_networking_juniper cloudshell-shell-core==3.1.240 cloudshell-snmp==3.0.136 coverage==7.8.0 ecdsa==0.19.1 exceptiongroup==1.2.2 execnet==2.1.1 iniconfig==2.1.0 ipcalc==1.1.3 jsonpickle==0.9.3 mock==5.2.0 packaging==24.2 paramiko==1.15.2 pluggy==1.5.0 ply==3.8 pyasn1==0.1.9 pycrypto==2.6.1 pysmi==0.0.6 pysnmp==4.3.1 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 scpclient==0.7 six==1.17.0 tomli==2.2.1 typing_extensions==4.13.0
name: cloudshell-networking-juniper channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cloudshell-automation-api==7.0.0.132 - cloudshell-cli==3.1.2 - cloudshell-core==2.2.180 - cloudshell-networking==5.0.0 - cloudshell-networking-devices==2.0.8 - cloudshell-shell-core==3.1.240 - cloudshell-snmp==3.0.136 - coverage==7.8.0 - ecdsa==0.19.1 - exceptiongroup==1.2.2 - execnet==2.1.1 - iniconfig==2.1.0 - ipcalc==1.1.3 - jsonpickle==0.9.3 - mock==5.2.0 - packaging==24.2 - paramiko==1.15.2 - pluggy==1.5.0 - ply==3.8 - pyasn1==0.1.9 - pycrypto==2.6.1 - pysmi==0.0.6 - pysnmp==4.3.1 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - scpclient==0.7 - six==1.17.0 - tomli==2.2.1 - typing-extensions==4.13.0 prefix: /opt/conda/envs/cloudshell-networking-juniper
[ "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_build_root2" ]
[ "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_generic_logical_ports_by_name_prop", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_generic_physical_ports_by_name_prop", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_get_content_indexes" ]
[ "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_build_root", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_content_indexes_prop", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_discover", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_if_indexes", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_init", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_initialize_snmp_handler", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_ipv4_table_prop", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_ipv6_table_prop", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_lldp_keys_prop", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_logger_property", "tests/networking/juniper/autoload/test_juniper_snmp_autoload.py::TestJuniperSnmpAutoload::test_snm_handler_property" ]
[]
null
2,800
[ ".gitignore", "version.txt", "cloudshell/networking/juniper/autoload/juniper_snmp_autoload.py" ]
[ ".gitignore", "version.txt", "cloudshell/networking/juniper/autoload/juniper_snmp_autoload.py" ]
pennmem__cmlreaders-135
23c4206adaf18b6c4f528543f9b4eded01a19c54
2018-07-20 18:32:00
355bb312d51b4429738ea491b7cfea4d2fec490c
diff --git a/cmlreaders/base_reader.py b/cmlreaders/base_reader.py index 8ac7906..e1bc92b 100644 --- a/cmlreaders/base_reader.py +++ b/cmlreaders/base_reader.py @@ -74,16 +74,6 @@ class BaseCMLReader(object, metaclass=_MetaReader): if self._file_path is None: self._file_path = file_path - # When no file path is given, look it up using PathFinder unless we're - # loading EEG data. EEG data is treated differently because of the way - # it is stored on rhino: sometimes it is split into one file per channel - # and other times it is a single HDF5 or EDF/BDF file. - if self._file_path is None and data_type != 'eeg': - finder = PathFinder(subject=subject, experiment=experiment, - session=session, localization=localization, - montage=montage, rootdir=rootdir) - self._file_path = finder.find(data_type) - self.subject = subject self.experiment = experiment self.session = session @@ -96,6 +86,21 @@ class BaseCMLReader(object, metaclass=_MetaReader): def protocol(self): return get_protocol(self.subject) + @property + def file_path(self): + """ + When no file path is given, look it up using PathFinder unless we're + loading EEG data. EEG data is treated differently because of the way + it is stored on rhino: sometimes it is split into one file per channel + and other times it is a single HDF5 or EDF/BDF file. + """ + if self._file_path is None and self.data_type != 'eeg': + finder = PathFinder(subject=self.subject, experiment=self.experiment, + session=self.session, localization=self.localization, + montage=self.montage, rootdir=self.rootdir) + self._file_path = finder.find(self.data_type) + return self._file_path + @classmethod def fromfile(cls, path: Union[str, Path], subject: Optional[str] = None, diff --git a/cmlreaders/readers/eeg.py b/cmlreaders/readers/eeg.py index 5a7247c..616b8e4 100644 --- a/cmlreaders/readers/eeg.py +++ b/cmlreaders/readers/eeg.py @@ -38,9 +38,9 @@ class EEGMetaReader(BaseCMLReader): def _read_sources_json(self) -> dict: """Read from a sources.json file.""" - with open(self._file_path, 'r') as metafile: + with open(self.file_path, 'r') as metafile: sources_info = list(json.load(metafile).values())[0] - sources_info['path'] = self._file_path + sources_info['path'] = self.file_path return sources_info def _read_params_txt(self) -> dict: @@ -48,13 +48,13 @@ class EEGMetaReader(BaseCMLReader): sources.json. """ - df = pd.read_table(self._file_path, sep=' ', header=None, index_col=0).T + df = pd.read_table(self.file_path, sep=' ', header=None, index_col=0).T sources_info = { "sample_rate": float(df["samplerate"].iloc[0]), "data_format": df["dataformat"].str.replace("'", "").iloc[0], "n_samples": None, - "path": self._file_path, + "path": self.file_path, } return sources_info diff --git a/cmlreaders/readers/electrodes.py b/cmlreaders/readers/electrodes.py index 4ec2646..15276a4 100644 --- a/cmlreaders/readers/electrodes.py +++ b/cmlreaders/readers/electrodes.py @@ -74,13 +74,13 @@ class MontageReader(BaseCMLReader): return df def as_dataframe(self): - with open(self._file_path) as f: + with open(self.file_path) as f: raw = json.load(f) # we're using fromfile, so we need to infer subject/data_type if not len(self.data_type): self.data_type = ( - "contacts" if "contacts" in os.path.basename(self._file_path) + "contacts" if "contacts" in os.path.basename(self.file_path) else "pairs" ) @@ -142,7 +142,7 @@ class LocalizationReader(BaseCMLReader): def as_dataframe(self): import itertools - with open(self._file_path) as f: + with open(self.file_path) as f: data = json.load(f) leads = list(data['leads'].values()) @@ -206,7 +206,7 @@ class ElectrodeCategoriesReader(BaseCMLReader): 'bad electrodes', 'bad electrodes:', 'broken leads', 'broken leads:' } - with open(self._file_path, 'r') as f: + with open(self.file_path, 'r') as f: ch_info = f.read().split('\n') # This will be used to initalize a before after kind of check to sort diff --git a/cmlreaders/readers/readers.py b/cmlreaders/readers/readers.py index ceb8eb9..1ace5ff 100644 --- a/cmlreaders/readers/readers.py +++ b/cmlreaders/readers/readers.py @@ -39,14 +39,14 @@ class TextReader(BaseCMLReader): sep = " " else: sep = "," # read_csv's default value - df = pd.read_csv(self._file_path, sep=sep, names=self._headers) + df = pd.read_csv(self.file_path, sep=sep, names=self._headers) return df class BaseCSVReader(BaseCMLReader): """Base class for reading CSV files.""" def as_dataframe(self): - df = pd.read_csv(self._file_path) + df = pd.read_csv(self.file_path) return df @@ -86,7 +86,7 @@ class RamulatorEventLogReader(BaseCMLReader): rootdir=rootdir) def as_dataframe(self): - with open(self._file_path, 'r') as efile: + with open(self.file_path, 'r') as efile: raw = json.loads(efile.read())['events'] exclude = ['to_id', 'from_id', 'event_id', 'command_id'] @@ -94,7 +94,7 @@ class RamulatorEventLogReader(BaseCMLReader): return df.drop(exclude, axis=1) def as_dict(self): - with open(self._file_path, 'r') as efile: + with open(self.file_path, 'r') as efile: raw_dict = json.load(efile) return raw_dict @@ -108,7 +108,7 @@ class BaseJSONReader(BaseCMLReader): data_types = [] def as_dataframe(self): - return pd.read_json(self._file_path) + return pd.read_json(self.file_path) class EventReader(BaseCMLReader): @@ -122,10 +122,10 @@ class EventReader(BaseCMLReader): ] def _read_json_events(self) -> pd.DataFrame: - return pd.read_json(self._file_path) + return pd.read_json(self.file_path) def _read_matlab_events(self) -> pd.DataFrame: - df = pd.DataFrame(sio.loadmat(self._file_path, squeeze_me=True)["events"]) + df = pd.DataFrame(sio.loadmat(self.file_path, squeeze_me=True)["events"]) if self.session is not None: df = df[df["session"] == self.session] @@ -133,7 +133,7 @@ class EventReader(BaseCMLReader): return df def as_dataframe(self): - if self._file_path.endswith(".json"): + if self.file_path.endswith(".json"): df = self._read_json_events() else: df = self._read_matlab_events() @@ -175,7 +175,7 @@ class ClassifierContainerReader(BaseCMLReader): def as_pyobject(self): summary_obj = self.pyclass_mapping['classifier'] - return summary_obj.load(self._file_path) + return summary_obj.load(self.file_path) def as_dataframe(self): raise UnsupportedRepresentation("Unable to represent classifier as a dataframe") diff --git a/cmlreaders/readers/reports.py b/cmlreaders/readers/reports.py index ecb92fd..e90e3c2 100644 --- a/cmlreaders/readers/reports.py +++ b/cmlreaders/readers/reports.py @@ -40,7 +40,7 @@ class BaseRAMReportDataReader(BaseCMLReader): def as_pyobject(self): """Return data as a python object specific to this data type.""" if self.data_type in self.pyclass_mapping: - return self.pyclass_mapping[self.data_type].from_hdf(self._file_path) + return self.pyclass_mapping[self.data_type].from_hdf(self.file_path) def as_dataframe(self): raise exc.UnsupportedRepresentation("Unable to represent this data as a dataframe") @@ -109,7 +109,7 @@ class RAMReportSummaryDataReader(BaseRAMReportDataReader): summary_obj = self.pyclass_mapping['fr_stim_summary'] - return summary_obj.from_hdf(self._file_path) + return summary_obj.from_hdf(self.file_path) def as_dataframe(self): pyobj = self.as_pyobject()
Defer FileNotFoundError until calling the load method The `BaseCMLReader` immediately tries to find files in `__init__`: https://github.com/pennmem/cmlreaders/blob/9423bd938b6d9e49395b6b175f1b42311e5c5bd1/cmlreaders/base_reader.py#L60-L76 This leads to some awkward exception handling logic if you want to optionally load something because you have to put the `try...except` around the creation of a reader object. It would be far more natural to do this around `reader.load`. An example of what I mean follows. What you have to do now is: ```python try: category_reader = ElectrodeCategoriesReader( data_type="electrode_categories", subject=self.subject, experiment=self.experiment, session=self.session, localization=self.localization, montage=self.montage, rootdir=self.rootdir, ) except FileNotFoundError: print("oops") categories = category_reader.load() ``` Ideally, we would instead do: ```python category_reader = ElectrodeCategoriesReader( data_type="electrode_categories", subject=self.subject, experiment=self.experiment, session=self.session, localization=self.localization, montage=self.montage, rootdir=self.rootdir, ) try: categories = category_reader.load() except FileNotFoundError: print("oops") ```
pennmem/cmlreaders
diff --git a/cmlreaders/test/test_readers.py b/cmlreaders/test/test_readers.py index 97e7520..e95211d 100644 --- a/cmlreaders/test/test_readers.py +++ b/cmlreaders/test/test_readers.py @@ -83,6 +83,15 @@ class TestTextReader: reread_data = re_reader.as_dataframe() assert reread_data is not None + def test_failures(self): + """ + When unable to locate a path, constructor should pass but `load()` + should fail. + """ + reader = TextReader('jacksheet', subject='R1XXX', localization=0) + with pytest.raises(FileNotFoundError): + reader.load() + class TestRAMCSVReader: @pytest.mark.parametrize("method", ["dataframe", "recarray", "dict"])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 5 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 cached-property==1.5.2 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/pennmem/cmlreaders.git@23c4206adaf18b6c4f528543f9b4eded01a19c54#egg=cmlreaders codecov==2.1.13 coverage==6.2 cycler==0.11.0 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 flake8==3.9.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.6.1 mistune==0.8.4 mne==0.23.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.7.0 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 zipp==3.6.0
name: cmlreaders channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cached-property==1.5.2 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cycler==0.11.0 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - flake8==3.9.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.6.1 - mistune==0.8.4 - mne==0.23.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cmlreaders
[ "cmlreaders/test/test_readers.py::TestTextReader::test_failures" ]
[ "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1405E-0-0-contacts]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1405E-0-0-pairs]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-0-contacts]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-0-pairs]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-1-contacts]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-1-pairs]", "cmlreaders/test/test_readers.py::TestElectrodeCategoriesReader::test_load[R1111M-lens0]", "cmlreaders/test/test_readers.py::TestElectrodeCategoriesReader::test_load[R1052E-lens1]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_as_methods[baseline_classifier-pyobject]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_as_methods[used_classifier-pyobject]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_to_methods[baseline_classifier-binary]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_to_methods[used_classifier-binary]" ]
[ "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-voxel_coordinates-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-voxel_coordinates-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-voxel_coordinates-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-leads-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-leads-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-leads-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-classifier_excluded_leads-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-classifier_excluded_leads-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-classifier_excluded_leads-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-good_leads-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-good_leads-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-good_leads-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-jacksheet-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-jacksheet-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-jacksheet-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-area-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-area-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-area-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_read_jacksheet", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-voxel_coordinates-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-voxel_coordinates-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-leads-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-leads-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-classifier_excluded_leads-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-classifier_excluded_leads-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-good_leads-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-good_leads-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-jacksheet-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-jacksheet-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-area-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-area-csv]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-electrode_coordinates-dataframe]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-electrode_coordinates-recarray]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-electrode_coordinates-dict]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-prior_stim_results-dataframe]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-prior_stim_results-recarray]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-prior_stim_results-dict]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-target_selection_table-dataframe]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-target_selection_table-recarray]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-target_selection_table-dict]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-electrode_coordinates-json]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-electrode_coordinates-csv]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-prior_stim_results-json]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-prior_stim_results-csv]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-target_selection_table-json]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-target_selection_table-csv]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_as_methods[R1409D-catFR1-1-event_log-dataframe]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_as_methods[R1409D-catFR1-1-event_log-recarray]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_as_methods[R1409D-catFR1-1-event_log-dict]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_to_methods[R1409D-catFR1-1-event_log-json]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_to_methods[R1409D-catFR1-1-event_log-csv]", "cmlreaders/test/test_readers.py::TestBaseJSONReader::test_load", "cmlreaders/test/test_readers.py::TestEventReader::test_load_json", "cmlreaders/test/test_readers.py::TestEventReader::test_load_matlab[all_events]", "cmlreaders/test/test_readers.py::TestEventReader::test_load_matlab[task_events]", "cmlreaders/test/test_readers.py::TestEventReader::test_load_matlab[math_events]", "cmlreaders/test/test_readers.py::TestLocalizationReader::test_load", "cmlreaders/test/test_readers.py::test_fromfile[ElectrodeCategoriesReader-/cmlreaders/cmlreaders/test/data/electrode_categories.txt-dict]", "cmlreaders/test/test_readers.py::test_fromfile[MontageReader-/cmlreaders/cmlreaders/test/data/pairs.json-DataFrame]", "cmlreaders/test/test_readers.py::test_fromfile[MontageReader-/cmlreaders/cmlreaders/test/data/contacts.json-DataFrame]", "cmlreaders/test/test_readers.py::test_fromfile[RamulatorEventLogReader-/cmlreaders/cmlreaders/test/data/event_log.json-DataFrame]" ]
[]
null
2,801
[ "cmlreaders/readers/electrodes.py", "cmlreaders/readers/eeg.py", "cmlreaders/readers/readers.py", "cmlreaders/base_reader.py", "cmlreaders/readers/reports.py" ]
[ "cmlreaders/readers/electrodes.py", "cmlreaders/readers/eeg.py", "cmlreaders/readers/readers.py", "cmlreaders/base_reader.py", "cmlreaders/readers/reports.py" ]
xonsh__xonsh-2738
afdc6b027cde64e3a0939e9b2da9f99c2fd2dc96
2018-07-20 23:22:10
b22ace6eea5783cc7879e5bcccdc20f3d5d1627d
diff --git a/news/firstse.rst b/news/firstse.rst new file mode 100644 index 00000000..8627a726 --- /dev/null +++ b/news/firstse.rst @@ -0,0 +1,14 @@ +**Added:** None + +**Changed:** None + +**Deprecated:** None + +**Removed:** None + +**Fixed:** + +* Fixed issue with ``SyntaxErrors`` being reported on the wrong line + when a block of code contained multiple implicit subprocesses. + +**Security:** None diff --git a/xonsh/execer.py b/xonsh/execer.py index 2fbaf2c8..f9775a5d 100644 --- a/xonsh/execer.py +++ b/xonsh/execer.py @@ -177,6 +177,8 @@ class Execer(object): last_error_col in (e.loc.column + 1, e.loc.column)): raise original_error from None + elif last_error_line != e.loc.lineno: + original_error = e last_error_col = e.loc.column last_error_line = e.loc.lineno idx = last_error_line - 1
Incorrect line numbers given in traceback when running a xonsh script Suppose you have a xonsh script with a syntax error in it. A traceback will be generated properly, but it will sometimes incorrectly point to the wrong line in the file if preceded by lines in subprocess mode. The problem seems to be that blank lines and comments are ignored. For example: ``` $ cat test.xsh #!/usr/bin/env xonsh echo "This is line 3" print ("This is line 4") x = "This is a string where I forget the closing quote on line 5 echo "This is line 6" $ ./test.xsh Traceback (most recent call last): [trimmed traceback] SyntaxError: ./test.xsh:3:5: ('code: "This is line 3"',) echo "This is line 3" ^ ``` It says the error is on line 3, character 5, but it's really on line 5. However, suppose that first `echo` call is changed to another print call. Then we get the correct line: ``` $ cat test.xsh #!/usr/bin/env xonsh print("This is line 3") print ("This is line 4") x = "This is a string where I forget the closing quote on line 5 echo "This is line 6" $ ./test.xsh Traceback (most recent call last): [trimmed traceback] SyntaxError: ./test.xsh:5:4: " x = "This is a string where I forget the closing quote on line 5 ^ ``` Lastly, suppose we replace the `print` call on line 4 with a call in subprocess mode to `echo`. Then the error is now on line 4: ``` $ cat test.xsh #!/usr/bin/env xonsh print("This is line 3") echo "This is line 4" x = "This is a string where I forget the closing quote on line 5 echo "This is line 6" $ ./test.xsh Traceback (most recent call last): [trimmed traceback] SyntaxError: ./test.xsh:4:5: ('code: "This is line 4"',) echo "This is line 4" ^ ``` For reference, here is the entire traceback, which (besides the last part with the line) is identical in all three runs: ``` $ ./test.xsh Traceback (most recent call last): File "/usr/local/bin/xonsh", line 3, in <module> main() File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20837, in main _failback_to_other_shells(args, err) File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20801, in _failback_to_other_shells raise err File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20835, in main return main_xonsh(args) File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20876, in main_xonsh loc=None, mode='exec') File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 2438, in run_script_with_cache ccode = compile_code(filename, code, execer, glb, loc, mode) File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 2397, in compile_code filename=filename) File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20054, in compile transform=transform) File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20023, in parse tree, input = self._parse_ctx_free(input, mode=mode, filename=filename) File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20123, in _parse_ctx_free raise original_error from None File "/usr/local/lib/python3.5/dist-packages/xonsh/__amalgam__.py", line 20110, in _parse_ctx_free debug_level=(self.debug_level > 2)) File "/usr/local/lib/python3.5/dist-packages/xonsh/parsers/base.py", line 349, in parse tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level) File "/usr/local/lib/python3.5/dist-packages/xonsh/ply/ply/yacc.py", line 331, in parse return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) File "/usr/local/lib/python3.5/dist-packages/xonsh/ply/ply/yacc.py", line 1199, in parseopt_notrack tok = call_errorfunc(self.errorfunc, errtoken, self) File "/usr/local/lib/python3.5/dist-packages/xonsh/ply/ply/yacc.py", line 193, in call_errorfunc r = errorfunc(token) File "/usr/local/lib/python3.5/dist-packages/xonsh/parsers/base.py", line 2760, in p_error column=p.lexpos)) File "/usr/local/lib/python3.5/dist-packages/xonsh/parsers/base.py", line 482, in _parse_error raise err SyntaxError: ./test.xsh:4:5: ('code: "This is line 4"',) echo "This is line 4" ^ ```
xonsh/xonsh
diff --git a/tests/test_integrations.py b/tests/test_integrations.py index 35939e51..1364a330 100644 --- a/tests/test_integrations.py +++ b/tests/test_integrations.py @@ -287,6 +287,18 @@ def test_eof_syntax_error(): assert ':2:0: EOF in multi-line statement' in err +def test_open_quote_syntax_error(): + script = ('#!/usr/bin/env xonsh\n\n' + 'echo "This is line 3"\n' + 'print ("This is line 4")\n' + 'x = "This is a string where I forget the closing quote on line 5\n' + 'echo "This is line 6"\n') + out, err, rtn = run_xonsh(script, stderr=sp.PIPE) + assert """:3:5: ('code: "This is line 3"',)""" not in err + assert ':5:4: "' in err + assert 'SyntaxError:' in err + + _bad_case = pytest.mark.skipif(ON_DARWIN or ON_WINDOWS or ON_TRAVIS, reason="bad platforms")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements-docs.txt", "requirements-tests.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 Babel==2.14.0 backcall==0.2.0 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 charset-normalizer==3.4.1 cloud-sptheme==1.10.1.post20200504175005 codecov==2.1.13 coverage==7.2.7 cryptography==44.0.2 cycler==0.11.0 debugpy==1.7.0 decorator==5.1.1 doctr==1.9.0 docutils==0.19 entrypoints==0.4 exceptiongroup==1.2.2 flake8==3.5.0 fonttools==4.38.0 idna==3.10 imagesize==1.4.1 importlib-metadata==6.7.0 iniconfig==2.0.0 ipykernel==6.16.2 ipython==7.34.0 jedi==0.19.2 Jinja2==3.1.6 jupyter_client==7.4.9 jupyter_core==4.12.0 kiwisolver==1.4.5 MarkupSafe==2.1.5 matplotlib==3.5.3 matplotlib-inline==0.1.6 mccabe==0.6.1 nest-asyncio==1.6.0 numpy==1.21.6 numpydoc==1.5.0 packaging==24.0 parso==0.8.4 pexpect==4.9.0 pickleshare==0.7.5 Pillow==9.5.0 pluggy==1.2.0 ply==3.11 prompt_toolkit==3.0.48 psutil==7.0.0 ptyprocess==0.7.0 py==1.11.0 pycodestyle==2.3.1 pycparser==2.21 pyflakes==1.6.0 Pygments==2.17.2 pyparsing==3.1.4 pytest==7.4.4 pytest-cov==4.1.0 pytest-flake8==1.1.0 pytest-timeout==2.3.1 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==26.2.1 requests==2.31.0 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==2.0.1 tornado==6.2 traitlets==5.9.0 typing_extensions==4.7.1 urllib3==2.0.7 wcwidth==0.2.13 -e git+https://github.com/xonsh/xonsh.git@afdc6b027cde64e3a0939e9b2da9f99c2fd2dc96#egg=xonsh zipp==3.15.0
name: xonsh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - babel==2.14.0 - backcall==0.2.0 - cffi==1.15.1 - charset-normalizer==3.4.1 - cloud-sptheme==1.10.1.post20200504175005 - codecov==2.1.13 - coverage==7.2.7 - cryptography==44.0.2 - cycler==0.11.0 - debugpy==1.7.0 - decorator==5.1.1 - doctr==1.9.0 - docutils==0.19 - entrypoints==0.4 - exceptiongroup==1.2.2 - flake8==3.5.0 - fonttools==4.38.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - ipykernel==6.16.2 - ipython==7.34.0 - jedi==0.19.2 - jinja2==3.1.6 - jupyter-client==7.4.9 - jupyter-core==4.12.0 - kiwisolver==1.4.5 - markupsafe==2.1.5 - matplotlib==3.5.3 - matplotlib-inline==0.1.6 - mccabe==0.6.1 - nest-asyncio==1.6.0 - numpy==1.21.6 - numpydoc==1.5.0 - packaging==24.0 - parso==0.8.4 - pexpect==4.9.0 - pickleshare==0.7.5 - pillow==9.5.0 - pluggy==1.2.0 - ply==3.11 - prompt-toolkit==3.0.48 - psutil==7.0.0 - ptyprocess==0.7.0 - py==1.11.0 - pycodestyle==2.3.1 - pycparser==2.21 - pyflakes==1.6.0 - pygments==2.17.2 - pyparsing==3.1.4 - pytest==7.4.4 - pytest-cov==4.1.0 - pytest-flake8==1.1.0 - pytest-timeout==2.3.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==26.2.1 - requests==2.31.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==2.0.1 - tornado==6.2 - traitlets==5.9.0 - typing-extensions==4.7.1 - urllib3==2.0.7 - wcwidth==0.2.13 - zipp==3.15.0 prefix: /opt/conda/envs/xonsh
[ "tests/test_integrations.py::test_open_quote_syntax_error" ]
[ "tests/test_integrations.py::test_printfile", "tests/test_integrations.py::test_printname", "tests/test_integrations.py::test_sourcefile" ]
[ "tests/test_integrations.py::test_script[case0]", "tests/test_integrations.py::test_script[case1]", "tests/test_integrations.py::test_script[case2]", "tests/test_integrations.py::test_script[case3]", "tests/test_integrations.py::test_script[case4]", "tests/test_integrations.py::test_script[case5]", "tests/test_integrations.py::test_script[case6]", "tests/test_integrations.py::test_script[case7]", "tests/test_integrations.py::test_script[case8]", "tests/test_integrations.py::test_script[case9]", "tests/test_integrations.py::test_script[case10]", "tests/test_integrations.py::test_script[case11]", "tests/test_integrations.py::test_script[case12]", "tests/test_integrations.py::test_script[case13]", "tests/test_integrations.py::test_script[case14]", "tests/test_integrations.py::test_script_stderr[case0]", "tests/test_integrations.py::test_single_command_no_windows[pwd-None-<lambda>]", "tests/test_integrations.py::test_single_command_no_windows[echo", "tests/test_integrations.py::test_single_command_no_windows[ls", "tests/test_integrations.py::test_eof_syntax_error", "tests/test_integrations.py::test_subshells[\\nwith", "tests/test_integrations.py::test_redirect_out_to_file[pwd-<lambda>]" ]
[]
BSD License
2,802
[ "news/firstse.rst", "xonsh/execer.py" ]
[ "news/firstse.rst", "xonsh/execer.py" ]
pybel__pybel-315
dca028340c84849e7b7bf6999b84b093dd52edf0
2018-07-21 11:36:33
6b0eb5dcb19400f3a64ac4830747bfe8dcbe8141
cthoyt: Before merging this, remove the whole query folder and start a new PR codecov[bot]: # [Codecov](https://codecov.io/gh/pybel/pybel/pull/315?src=pr&el=h1) Report > :exclamation: No coverage uploaded for pull request base (`develop@dca0283`). [Click here to learn what that means](https://docs.codecov.io/docs/error-reference#section-missing-base-commit). > The diff coverage is `26.87%`. [![Impacted file tree graph](https://codecov.io/gh/pybel/pybel/pull/315/graphs/tree.svg?token=J7joRTRygG&src=pr&height=150&width=650)](https://codecov.io/gh/pybel/pybel/pull/315?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## develop #315 +/- ## ========================================== Coverage ? 84.76% ========================================== Files ? 129 Lines ? 6775 Branches ? 1092 ========================================== Hits ? 5743 Misses ? 841 Partials ? 191 ``` | [Impacted Files](https://codecov.io/gh/pybel/pybel/pull/315?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/pybel/struct/query/constants.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9jb25zdGFudHMucHk=) | `0% <0%> (ø)` | | | [src/pybel/struct/query/query.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9xdWVyeS5weQ==) | `0% <0%> (ø)` | | | [src/pybel/struct/query/exc.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9leGMucHk=) | `0% <0%> (ø)` | | | [src/pybel/struct/query/random\_subgraph.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9yYW5kb21fc3ViZ3JhcGgucHk=) | `0% <0%> (ø)` | | | [src/pybel/struct/query/selection.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9xdWVyeS9zZWxlY3Rpb24ucHk=) | `0% <0%> (ø)` | | | [src/pybel/struct/mutation/\_\_init\_\_.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9tdXRhdGlvbi9fX2luaXRfXy5weQ==) | `100% <100%> (ø)` | | | [src/pybel/struct/mutation/induction/citation.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9tdXRhdGlvbi9pbmR1Y3Rpb24vY2l0YXRpb24ucHk=) | `100% <100%> (ø)` | | | [...rc/pybel/struct/filters/edge\_predicate\_builders.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9maWx0ZXJzL2VkZ2VfcHJlZGljYXRlX2J1aWxkZXJzLnB5) | `95.23% <100%> (ø)` | | | [src/pybel/struct/mutation/induction/\_\_init\_\_.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9tdXRhdGlvbi9pbmR1Y3Rpb24vX19pbml0X18ucHk=) | `100% <100%> (ø)` | | | [...rc/pybel/struct/mutation/induction/neighborhood.py](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL3N0cnVjdC9tdXRhdGlvbi9pbmR1Y3Rpb24vbmVpZ2hib3Job29kLnB5) | `42.85% <42.85%> (ø)` | | | ... and [6 more](https://codecov.io/gh/pybel/pybel/pull/315/diff?src=pr&el=tree-more) | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/pybel/pybel/pull/315?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/pybel/pybel/pull/315?src=pr&el=footer). Last update [dca0283...80ba1a1](https://codecov.io/gh/pybel/pybel/pull/315?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/src/pybel/dsl/nodes.py b/src/pybel/dsl/nodes.py index db6df939..2494d631 100644 --- a/src/pybel/dsl/nodes.py +++ b/src/pybel/dsl/nodes.py @@ -46,30 +46,26 @@ class BaseEntity(dict): @abc.abstractmethod def as_tuple(self): - """Returns this entity as a canonical tuple + """Return this entity as a PyBEL tuple. :rtype: tuple """ @abc.abstractmethod def as_bel(self): - """Returns this entity as canonical BEL + """Return this entity as a BEL string. :rtype: tuple """ def as_sha512(self): - """Returns this entity as a hash + """Return this entity as a SHA512 hash encoded in UTF-8. :rtype: str """ return hash_node(self.as_tuple()) def __hash__(self): - """Use the tuple serialization of this node as the hash - - :rtype: int - """ return hash(self.as_tuple()) def __str__(self): @@ -77,14 +73,14 @@ class BaseEntity(dict): class BaseAbundance(BaseEntity): - """The superclass for building node data dictionaries""" + """The superclass for building node data dictionaries.""" def __init__(self, func, namespace, name=None, identifier=None): - """ + """Build an abundance from a function, namespace and a name and/or identifier. :param str func: The PyBEL function :param str namespace: The name of the namespace - :param Optional[str] name: - :param Optional[str] identifier: + :param Optional[str] name: The name of this abundance + :param Optional[str] identifier: The database identifier for this abundance """ if name is None and identifier is None: raise PyBELDSLException('Either name or identifier must be specified') @@ -94,29 +90,45 @@ class BaseAbundance(BaseEntity): @property def function(self): + """The function of this abundance. + + :rtype: str + """ return self[FUNCTION] @property def name(self): + """The name of this abundance. + + :rtype: Optional[str] + """ return self.get(NAME) @property def namespace(self): + """The namespace of this abundance. + + :rtype: str + """ return self.get(NAMESPACE) @property def identifier(self): + """The identifier of this abundance. + + :rtype: Optional[str] + """ return self.get(IDENTIFIER) def as_tuple(self): - """Returns this node as a PyBEL node tuple + """Return this node as a PyBEL node tuple. :rtype: tuple """ return self[FUNCTION], self[NAMESPACE], self[NAME] def as_bel(self): - """Returns this node as BEL + """Return this node as a BEL string. :rtype: str """ @@ -128,13 +140,14 @@ class BaseAbundance(BaseEntity): class abundance(BaseAbundance): - """Builds an abundance node data dictionary""" + """Builds an abundance node data dictionary.""" def __init__(self, namespace, name=None, identifier=None): - """ + """Build a general abundance entitiy. + :param str namespace: The name of the database used to identify this entity - :param str name: The database's preferred name or label for this entity - :param str identifier: The database's identifier for this entity + :param Optional[str] name: The database's preferred name or label for this entity + :param Optional[str] identifier: The database's identifier for this entity Example: diff --git a/src/pybel/struct/filters/edge_predicate_builders.py b/src/pybel/struct/filters/edge_predicate_builders.py index 23bd9aa4..87612e94 100644 --- a/src/pybel/struct/filters/edge_predicate_builders.py +++ b/src/pybel/struct/filters/edge_predicate_builders.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- +from six import string_types -from .edge_predicates import edge_predicate, keep_edge_permissive -from ...constants import ANNOTATIONS, CAUSAL_RELATIONS, RELATION +from .edge_predicates import edge_predicate, has_authors, has_pubmed, keep_edge_permissive +from ...constants import ANNOTATIONS, CAUSAL_RELATIONS, CITATION, CITATION_AUTHORS, CITATION_REFERENCE, RELATION __all__ = [ 'build_annotation_dict_all_filter', @@ -10,6 +11,8 @@ __all__ = [ 'build_upstream_edge_predicate', 'build_downstream_edge_predicate', 'build_relation_predicate', + 'build_pmid_inclusion_filter', + 'build_author_inclusion_filter', ] @@ -39,8 +42,8 @@ def _annotation_dict_all_filter(data, query): def build_annotation_dict_all_filter(annotations): - """Builds a filter that keeps edges whose data dictionaries's annotations entry are super-dictionaries to the given - dictionary. + """Build an edge predicate that passes for edges whose data dictionaries's annotations entry are super-dictionaries + to the given dictionary. If no annotations are given, will always evaluate to true. @@ -82,10 +85,9 @@ def _annotation_dict_any_filter(data, query): def build_annotation_dict_any_filter(annotations): - """Builds a filter that keeps edges whose data dictionaries's annotations entry contain any match to - the target dictionary. + """Build an edge predicate that passes for edges whose data dictionaries match the given dictionary. - If no annotations are given, will always evaluate to true. + If the given dictionary is empty, will always evaluate to true. :param dict annotations: The annotation query dict to match :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool @@ -106,34 +108,138 @@ def build_annotation_dict_any_filter(annotations): def build_upstream_edge_predicate(nodes): + """Build an edge predicate that pass for relations for which one of the given nodes is the object. + + :param iter[tuple] nodes: An iterable of PyBEL node tuples + :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool + """ + nodes = set(nodes) + def upstream_filter(graph, u, v, k): + """Pass for relations for which one of the given nodes is the object. + + :type graph: pybel.BELGraph + :type u: tuple + :type v: tuple + :type k: int + :rtype: bool + """ return v in nodes and graph[u][v][k][RELATION] in CAUSAL_RELATIONS return upstream_filter def build_downstream_edge_predicate(nodes): + """Build an edge predicate that passes for edges for which one of the given nodes is the subject. + + :param iter[tuple] nodes: An iterable of PyBEL node tuples + :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool + """ + nodes = set(nodes) + def downstream_filter(graph, u, v, k): + """Pass for relations for which one of the given nodes is the subject. + + :type graph: pybel.BELGraph + :type u: tuple + :type v: tuple + :type k: int + :rtype: bool + """ return u in nodes and graph[u][v][k][RELATION] in CAUSAL_RELATIONS return downstream_filter def build_relation_predicate(relation): - """Build an edge predicate that matches edges with the given relation + """Build an edge predicate that passes for edges with the given relation. :param str relation: A relation string :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool """ - @edge_predicate - def is_relation(data): - """Only passes on associative edges + def relation_predicate(graph, u, v, k): + """Pass for relations matching the enclosed value. - :param dict data: The PyBEL edge data dictionary - :return: If the edge is a causal edge + :type graph: pybel.BELGraph + :type u: tuple + :type v: tuple + :type k: int :rtype: bool """ - return data[RELATION] == relation + return relation == graph[u][v][k][RELATION] - return is_relation + return relation_predicate + + +def build_pmid_inclusion_filter(pmids): + """Build an edge predicate that passes for edges with citations from the given PubMed identifier(s). + + :param pmids: A PubMed identifier or list of PubMed identifiers to filter for + :type pmids: str or iter[str] + :return: An edge predicate + :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool + """ + if isinstance(pmids, string_types): + @edge_predicate + def pmid_inclusion_filter(data): + """Pass for edges with PubMed citations matching the contained PubMed identifier. + + :param dict data: The edge data dictionary + :return: If the edge has a PubMed citation with the contained PubMed identifier + :rtype: bool + """ + return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] == pmids + + else: + pmids = set(pmids) + + @edge_predicate + def pmid_inclusion_filter(data): + """Pass for edges with PubMed citations matching one of the contained PubMed identifiers. + + :param dict data: The edge data dictionary + :return: If the edge has a PubMed citation with one of the contained PubMed identifiers + :rtype: bool + """ + return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] in pmids + + return pmid_inclusion_filter + + +def build_author_inclusion_filter(authors): + """Build an edge predicate that passes for edges with citations written by the given author(s). + + :param authors: An author or list of authors + :type authors: str or iter[str] + :return: An edge predicate + :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool + """ + if isinstance(authors, string_types): + @edge_predicate + def author_filter(data): + """Pass for edges with citations with an author that matches the contained author. + + :param dict data: The edge data dictionary + :return: If the edge has a citation with an author that matches the the contained author + :rtype: bool + """ + return has_authors(data) and authors in data[CITATION][CITATION_AUTHORS] + + else: + authors = set(authors) + + @edge_predicate + def author_filter(data): + """Pass for edges with citations with an author that matches one or more of the contained authors. + + :param dict data: The edge data dictionary + :return: If the edge has a citation with an author that matches the the contained author + :rtype: bool + """ + return has_authors(data) and any( + author in data[CITATION][CITATION_AUTHORS] + for author in authors + ) + + return author_filter diff --git a/src/pybel/struct/filters/node_predicate_builders.py b/src/pybel/struct/filters/node_predicate_builders.py index f00c624f..b6c37877 100644 --- a/src/pybel/struct/filters/node_predicate_builders.py +++ b/src/pybel/struct/filters/node_predicate_builders.py @@ -6,11 +6,14 @@ from collections import Iterable from six import string_types -from ...constants import FUNCTION +from ...constants import FUNCTION, NAME __all__ = [ 'function_inclusion_filter_builder', 'data_missing_key_builder', + 'build_node_data_search', + 'build_node_key_search', + 'build_node_name_search', ] @@ -83,3 +86,60 @@ def data_missing_key_builder(key): return key not in graph.node[node] return data_does_not_contain_key + + +def build_node_data_search(key, data_predicate): + """Pass for nodes who have the given key in their data dictionaries and whose associated values pass the given + filter function. + + :param str key: The node data dictionary key to check + :param data_predicate: The filter to apply to the node data dictionary + :type data_predicate: (Any) -> bool + :return: A node predicate + :rtype: (pybel.BELGraph, tuple) -> bool + """ + + def node_data_filter(graph, node): + """Pass if the given node has a given data annotated and passes the contained filter. + + :type graph: pybel.BELGraph + :type node: tuple + :return: If the node has the contained key in its data dictionary and passes the contained filter + :rtype: bool + """ + data = graph.node[node] + return key in data and data_predicate(data[key]) + + return node_data_filter + + +def build_node_key_search(query, key): + """Build a node filter that only passes for nodes whose values for the given key are superstrings of the query + string(s). + + :param query: The query string or strings to check if they're in the node name + :type query: str or iter[str] + :param str key: The key for the node data dictionary. Should refer only to entries that have str values + :return: A node predicate + :rtype: (pybel.BELGraph, tuple) -> bool + """ + if isinstance(query, string_types): + return build_node_data_search(key, lambda s: query.lower() in s.lower()) + + if isinstance(query, Iterable): + return build_node_data_search(key, lambda s: any(q.lower() in s.lower() for q in query)) + + raise TypeError('query is wrong type: %s', query) + + +def build_node_name_search(query): + """Search nodes' names. + + Is a thin wrapper around :func:`build_node_key_search` with :data:`pybel.constants.NAME` + + :param query: The query string or strings to check if they're in the node name + :type query: str or iter[str] + :return: A node predicate + :rtype: (pybel.BELGraph, tuple) -> bool + """ + return build_node_key_search(query=query, key=NAME) diff --git a/src/pybel/struct/graph.py b/src/pybel/struct/graph.py index aa627185..9ed3ea31 100644 --- a/src/pybel/struct/graph.py +++ b/src/pybel/struct/graph.py @@ -10,7 +10,16 @@ from six import string_types from .operations import left_full_join, left_node_intersection_join, left_outer_join from ..canonicalize import edge_to_bel, node_to_bel -from ..constants import * +from ..constants import ( + ANNOTATIONS, CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, DECREASES, DESCRIPTION, + DIRECTLY_DECREASES, DIRECTLY_INCREASES, EQUIVALENT_TO, EVIDENCE, FUNCTION, GRAPH_ANNOTATION_LIST, + GRAPH_ANNOTATION_PATTERN, GRAPH_ANNOTATION_URL, GRAPH_METADATA, GRAPH_NAMESPACE_PATTERN, GRAPH_NAMESPACE_URL, + GRAPH_PYBEL_VERSION, GRAPH_UNCACHED_NAMESPACES, HASH, HAS_COMPONENT, HAS_MEMBER, HAS_PRODUCT, HAS_REACTANT, + HAS_VARIANT, IDENTIFIER, INCREASES, IS_A, MEMBERS, METADATA_AUTHORS, METADATA_CONTACT, METADATA_COPYRIGHT, + METADATA_DESCRIPTION, METADATA_DISCLAIMER, METADATA_LICENSES, METADATA_NAME, METADATA_VERSION, NAME, NAMESPACE, + OBJECT, ORTHOLOGOUS, PART_OF, PRODUCTS, REACTANTS, RELATION, SUBJECT, TRANSCRIBED_TO, TRANSLATED_TO, VARIANTS, + unqualified_edge_code, +) from ..dsl import activity from ..tokens import node_to_tuple from ..utils import get_version, hash_edge @@ -435,7 +444,7 @@ class BELGraph(networkx.MultiDiGraph): :param v: Either a PyBEL node tuple or PyBEL node data dictionary representing the target node :type v: tuple or dict """ - return self.add_unqualified_edge(u,v, PART_OF) + return self.add_unqualified_edge(u, v, PART_OF) def add_has_member(self, u, v): """Add an hasMember relationship such that u hasMember v. @@ -445,7 +454,7 @@ class BELGraph(networkx.MultiDiGraph): :param v: Either a PyBEL node tuple or PyBEL node data dictionary representing the target node :type v: tuple or dict """ - return self.add_unqualified_edge(u,v, HAS_MEMBER) + return self.add_unqualified_edge(u, v, HAS_MEMBER) def add_increases(self, u, v, evidence, citation, annotations=None, subject_modifier=None, object_modifier=None, **attr): @@ -470,6 +479,29 @@ class BELGraph(networkx.MultiDiGraph): annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, **attr) + def add_directly_increases(self, u, v, evidence, citation, annotations=None, subject_modifier=None, + object_modifier=None, **attr): + """Wraps :meth:`add_qualified_edge` for :data:`pybel.constants.DIRECTLY_INCREASES`. + + :param tuple or dict u: Either a PyBEL node tuple or PyBEL node data dictionary representing the source node + :param tuple or dict v: Either a PyBEL node tuple or PyBEL node data dictionary representing the target node + :param str evidence: The evidence string from an article + :param dict[str,str] or str citation: The citation data dictionary for this evidence. If a string is given, + assumes it's a PubMed identifier and auto-fills the citation type. + :param annotations: The annotations data dictionary + :type annotations: Optional[dict[str,str] or dict[str,set] or dict[str,dict[str,bool]]] + :param Optional[dict] subject_modifier: The modifiers (like activity) on the subject node. See data model + documentation. + :param Optional[dict] object_modifier: The modifiers (like activity) on the object node. See data model + documentation. + + :return: The hash of the edge + :rtype: str + """ + return self.add_qualified_edge(u=u, v=v, relation=DIRECTLY_INCREASES, evidence=evidence, citation=citation, + annotations=annotations, subject_modifier=subject_modifier, + object_modifier=object_modifier, **attr) + def add_decreases(self, u, v, evidence, citation, annotations=None, subject_modifier=None, object_modifier=None, **attr): """Wraps :meth:`add_qualified_edge` for :data:`pybel.constants.DECREASES`. @@ -493,6 +525,29 @@ class BELGraph(networkx.MultiDiGraph): annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, **attr) + def add_directly_decreases(self, u, v, evidence, citation, annotations=None, subject_modifier=None, + object_modifier=None, **attr): + """Wraps :meth:`add_qualified_edge` for :data:`pybel.constants.DIRECTLY_DECREASES`. + + :param tuple or dict u: Either a PyBEL node tuple or PyBEL node data dictionary representing the source node + :param tuple or dict v: Either a PyBEL node tuple or PyBEL node data dictionary representing the target node + :param str evidence: The evidence string from an article + :param dict[str,str] or str citation: The citation data dictionary for this evidence. If a string is given, + assumes it's a PubMed identifier and auto-fills the citation type. + :param annotations: The annotations data dictionary + :type annotations: Optional[dict[str,str] or dict[str,set] or dict[str,dict[str,bool]]] + :param Optional[dict] subject_modifier: The modifiers (like activity) on the subject node. See data model + documentation. + :param Optional[dict] object_modifier: The modifiers (like activity) on the object node. See data model + documentation. + + :return: The hash of the edge + :rtype: str + """ + return self.add_qualified_edge(u=u, v=v, relation=DIRECTLY_DECREASES, evidence=evidence, citation=citation, + annotations=annotations, subject_modifier=subject_modifier, + object_modifier=object_modifier, **attr) + def iter_node_data_pairs(self): """Iterates over pairs of nodes and their data dictionaries diff --git a/src/pybel/struct/mutation/__init__.py b/src/pybel/struct/mutation/__init__.py index 1a2ddbf5..d77319b3 100644 --- a/src/pybel/struct/mutation/__init__.py +++ b/src/pybel/struct/mutation/__init__.py @@ -2,11 +2,12 @@ """This module contains functions that mutate or make transformations on a network.""" -from . import collapse, deletion, expansion, induction, inference, metadata, transfer, utils +from . import collapse, deletion, expansion, induction, induction_expansion, inference, metadata, transfer, utils from .collapse import * from .deletion import * from .expansion import * from .induction import * +from .induction_expansion import * from .inference import * from .metadata import * from .transfer import * @@ -17,6 +18,7 @@ __all__ = ( deletion.__all__ + expansion.__all__ + induction.__all__ + + induction_expansion.__all__ + inference.__all__ + metadata.__all__ + transfer.__all__ + diff --git a/src/pybel/struct/mutation/collapse/collapse.py b/src/pybel/struct/mutation/collapse/collapse.py index db2d1482..904228be 100644 --- a/src/pybel/struct/mutation/collapse/collapse.py +++ b/src/pybel/struct/mutation/collapse/collapse.py @@ -10,11 +10,20 @@ from ....constants import HAS_VARIANT, RELATION, unqualified_edges __all__ = [ 'collapse_pair', 'collapse_nodes', - 'collapse_edges_passing_predicates', 'collapse_all_variants', ] +def _remove_self_edges(graph): + self_edges = [ + (u, u, k) + for u in graph + if u in graph[u] + for k in graph[u][u] + ] + graph.remove_edges_from(self_edges) + + @in_place_transformation def collapse_pair(graph, survivor, victim): """Rewire all edges from the synonymous node to the survivor node, then deletes the synonymous node. @@ -46,6 +55,8 @@ def collapse_pair(graph, survivor, victim): graph.remove_node(victim) +# TODO what happens when collapsing is not consistent? Need to build intermediate mappings and test their consistency. + @in_place_transformation def collapse_nodes(graph, survivor_mapping): """Collapse all nodes in values to the key nodes, in place. @@ -59,31 +70,20 @@ def collapse_nodes(graph, survivor_mapping): for victim in victims: collapse_pair(graph, survivor=survivor, victim=victim) - # Remove self edges - graph.remove_edges_from( - (u, u, k) - for u in graph - if u in graph[u] - for k in graph[u][u] - ) + _remove_self_edges(graph) @in_place_transformation -def collapse_edges_passing_predicates(graph, edge_predicates=None): - """Collapse all edges passing the given edge predicates. +def collapse_all_variants(graph): + """Collapse all genes', RNAs', miRNAs', and proteins' variants to their parents. :param pybel.BELGraph graph: A BEL Graph - :param edge_predicates: A predicate or list of predicates - :type edge_predicates: None or (pybel.BELGraph, tuple, tuple, int) -> bool or iter[(pybel.BELGraph, tuple, tuple, int) -> bool] """ - for u, v, _ in filter_edges(graph, edge_predicates=edge_predicates): - collapse_pair(graph, survivor=u, victim=v) + has_variant_predicate = build_relation_predicate(HAS_VARIANT) + edges = list(filter_edges(graph, has_variant_predicate)) -@in_place_transformation -def collapse_all_variants(graph): - """Collapse all genes', RNAs', miRNAs', and proteins' variants to their parents. + for u, v, _ in edges: + collapse_pair(graph, survivor=u, victim=v) - :param pybel.BELGraph graph: A BEL Graph - """ - collapse_edges_passing_predicates(graph, build_relation_predicate(HAS_VARIANT)) + _remove_self_edges(graph) diff --git a/src/pybel/struct/mutation/induction/__init__.py b/src/pybel/struct/mutation/induction/__init__.py index f8eb2c66..20cbe05a 100644 --- a/src/pybel/struct/mutation/induction/__init__.py +++ b/src/pybel/struct/mutation/induction/__init__.py @@ -1,10 +1,18 @@ # -*- coding: utf-8 -*- -from . import upstream, utils +from . import annotations, citation, neighborhood, paths, upstream, utils +from .annotations import * +from .citation import * +from .neighborhood import * +from .paths import * from .upstream import * from .utils import * __all__ = ( + annotations.__all__ + + citation.__all__ + + neighborhood.__all__ + + paths.__all__ + upstream.__all__ + utils.__all__ ) diff --git a/src/pybel/struct/mutation/induction/annotations.py b/src/pybel/struct/mutation/induction/annotations.py new file mode 100644 index 00000000..2084d760 --- /dev/null +++ b/src/pybel/struct/mutation/induction/annotations.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +import logging + +from .utils import get_subgraph_by_edge_filter +from ...filters import build_annotation_dict_all_filter, build_annotation_dict_any_filter +from ...pipeline import transformation + +log = logging.getLogger(__name__) + +__all__ = [ + 'get_subgraph_by_annotation_value', + 'get_subgraph_by_annotations', +] + + +@transformation +def get_subgraph_by_annotations(graph, annotations, or_=None): + """Induce a sub-graph given an annotations filter. + + :param graph: pybel.BELGraph graph: A BEL graph + :param dict[str,set[str]] annotations: Annotation filters (match all with :func:`pybel.utils.subdict_matches`) + :param boolean or_: if True any annotation should be present, if False all annotations should be present in the + edge. Defaults to True. + :return: A subgraph of the original BEL graph + :rtype: pybel.BELGraph + """ + edge_filter_builder = ( + build_annotation_dict_any_filter + if (or_ is None or or_) else + build_annotation_dict_all_filter + ) + + return get_subgraph_by_edge_filter(graph, edge_filter_builder(annotations)) + + +@transformation +def get_subgraph_by_annotation_value(graph, annotation, value): + """Induce a sub-graph over all edges whose annotations match the given key and value. + + :param pybel.BELGraph graph: A BEL graph + :param str annotation: The annotation to group by + :param str value: The value for the annotation + :return: A subgraph of the original BEL graph + :rtype: pybel.BELGraph + """ + return get_subgraph_by_annotations(graph, {annotation: {value}}) diff --git a/src/pybel/struct/mutation/induction/citation.py b/src/pybel/struct/mutation/induction/citation.py new file mode 100644 index 00000000..4253b19e --- /dev/null +++ b/src/pybel/struct/mutation/induction/citation.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +"""Induction functions based on provenance information.""" + +import logging + +from .utils import get_subgraph_by_edge_filter +from ...filters.edge_predicate_builders import build_author_inclusion_filter, build_pmid_inclusion_filter +from ...pipeline import transformation + +__all__ = [ + 'get_subgraph_by_pubmed', + 'get_subgraph_by_authors', +] + +log = logging.getLogger(__name__) + + +@transformation +def get_subgraph_by_pubmed(graph, pubmed_identifiers): + """Induce a sub-graph over the edges retrieved from the given PubMed identifier(s). + + :param pybel.BELGraph graph: A BEL graph + :param str or list[str] pubmed_identifiers: A PubMed identifier or list of PubMed identifiers + :rtype: pybel.BELGraph + """ + return get_subgraph_by_edge_filter(graph, build_pmid_inclusion_filter(pubmed_identifiers)) + + +@transformation +def get_subgraph_by_authors(graph, authors): + """Induce a sub-graph over the edges retrieved publications by the given author(s). + + :param pybel.BELGraph graph: A BEL graph + :param str or list[str] authors: An author or list of authors + :rtype: pybel.BELGraph + """ + return get_subgraph_by_edge_filter(graph, build_author_inclusion_filter(authors)) diff --git a/src/pybel/struct/mutation/induction/neighborhood.py b/src/pybel/struct/mutation/induction/neighborhood.py new file mode 100644 index 00000000..08969c6a --- /dev/null +++ b/src/pybel/struct/mutation/induction/neighborhood.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +"""Functions for selecting by the neighborhoods of nodes.""" + +import itertools as itt + +from ...pipeline import transformation +from ...utils import update_metadata, update_node_helper + +__all__ = [ + 'get_subgraph_by_neighborhood', +] + + +@transformation +def get_subgraph_by_neighborhood(graph, nodes): + """Get a BEL graph around the neighborhoods of the given nodes. Returns none if no nodes are in the graph. + + :param pybel.BELGraph graph: A BEL graph + :param iter[tuple] nodes: An iterable of BEL nodes + :return: A BEL graph induced around the neighborhoods of the given nodes + :rtype: Optional[pybel.BELGraph] + """ + node_set = set(nodes) + + if not any(node in graph for node in node_set): + return + + rv = graph.fresh_copy() + + rv.add_edges_from( + ( + (u, v, k, d) + if k < 0 else + (u, v, d) + ) + for u, v, k, d in itt.chain( + graph.in_edges_iter(nodes, keys=True, data=True), + graph.out_edges_iter(nodes, keys=True, data=True) + ) + ) + + update_node_helper(graph, rv) + update_metadata(graph, rv) + + return rv diff --git a/src/pybel/struct/mutation/induction/paths.py b/src/pybel/struct/mutation/induction/paths.py new file mode 100644 index 00000000..f3f2973d --- /dev/null +++ b/src/pybel/struct/mutation/induction/paths.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- + +"""Induction methods for graphs over shortest paths.""" + +import itertools as itt + +import logging +import networkx as nx + +from .utils import get_subgraph_by_induction +from ...pipeline import transformation +from ....constants import FUNCTION, PATHOLOGY + +__all__ = [ + 'get_nodes_in_all_shortest_paths', + 'get_subgraph_by_all_shortest_paths', +] + +log = logging.getLogger(__name__) + + +def _remove_pathologies_oop(graph): + """Remove pathology nodes from the graph.""" + rv = graph.copy() + for node, data in rv.nodes(data=True): + if data[FUNCTION] == PATHOLOGY: + rv.remove_node(node) + return rv + + +def _iterate_nodes_in_shortest_paths(graph, nodes, weight=None): + """Iterate over nodes in the shortest paths between all pairs of nodes in the given list. + + :type graph: pybel.BELGraph + :type nodes: list[tuple] + :param weight: Optional[str] + :rtype: iter[tuple] + """ + for source, target in itt.product(nodes, repeat=2): + try: + paths = nx.all_shortest_paths(graph, source, target, weight=weight) + for path in paths: + for node in path: + yield node + except nx.exception.NetworkXNoPath: + continue + + +def get_nodes_in_all_shortest_paths(graph, nodes, weight=None, remove_pathologies=False): + """Get a set of nodes in all shortest paths between the given nodes. + + Thinly wraps :func:`networkx.all_shortest_paths`. + + :param pybel.BELGraph graph: A BEL graph + :param iter[tuple] nodes: The list of nodes to use to use to find all shortest paths + :param Optional[str] weight: Edge data key corresponding to the edge weight. If none, uses unweighted search. + :param bool remove_pathologies: Should pathology nodes be removed first? + :return: A set of nodes appearing in the shortest paths between nodes in the BEL graph + :rtype: set[tuple] + + .. note:: This can be trivially parallelized using :func:`networkx.single_source_shortest_path` + """ + if remove_pathologies: + graph = _remove_pathologies_oop(graph) + + return set(_iterate_nodes_in_shortest_paths(graph, nodes, weight=weight)) + + +@transformation +def get_subgraph_by_all_shortest_paths(graph, nodes, weight=None, remove_pathologies=False): + """Induce a subgraph over the nodes in the pairwise shortest paths between all of the nodes in the given list. + + :param pybel.BELGraph graph: A BEL graph + :param iter[tuple] nodes: A set of nodes over which to calculate shortest paths + :param str weight: Edge data key corresponding to the edge weight. If None, performs unweighted search + :param bool remove_pathologies: Should the pathology nodes be deleted before getting shortest paths? + :return: A BEL graph induced over the nodes appearing in the shortest paths between the given nodes + :rtype: Optional[pybel.BELGraph] + """ + query_nodes = [] + + for node in nodes: + if node not in graph: + log.debug('%s not in %s', node, graph) + continue + query_nodes.append(node) + + if not query_nodes: + return + + induced_nodes = get_nodes_in_all_shortest_paths(graph, query_nodes, weight=weight, + remove_pathologies=remove_pathologies) + + if not induced_nodes: + return + + return get_subgraph_by_induction(graph, induced_nodes) diff --git a/src/pybel/struct/mutation/induction/utils.py b/src/pybel/struct/mutation/induction/utils.py index 85ca2a2f..4f07f159 100644 --- a/src/pybel/struct/mutation/induction/utils.py +++ b/src/pybel/struct/mutation/induction/utils.py @@ -5,6 +5,7 @@ from ...pipeline import transformation __all__ = [ 'get_subgraph_by_edge_filter', + 'get_subgraph_by_induction', ] @@ -21,3 +22,17 @@ def get_subgraph_by_edge_filter(graph, edge_predicates=None): rv = graph.fresh_copy() expand_by_edge_filter(graph, rv, edge_predicates=edge_predicates) return rv + + +@transformation +def get_subgraph_by_induction(graph, nodes): + """Induce a sub-graph over the given nodes or return None if none of the nodes are in the given graph. + + :param pybel.BELGraph graph: A BEL graph + :param iter[tuple] nodes: A list of BEL nodes in the graph + :rtype: Optional[pybel.BELGraph] + """ + if all(node not in graph for node in nodes): + return + + return graph.subgraph(nodes) diff --git a/src/pybel/struct/mutation/induction_expansion.py b/src/pybel/struct/mutation/induction_expansion.py new file mode 100644 index 00000000..d853f4b1 --- /dev/null +++ b/src/pybel/struct/mutation/induction_expansion.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +"""Functions for building graphs that use both expansion and induction procedures.""" + +import logging + +from .expansion import expand_all_node_neighborhoods +from .expansion.upstream import expand_downstream_causal, expand_upstream_causal +from .induction.neighborhood import get_subgraph_by_neighborhood +from .induction.upstream import get_downstream_causal_subgraph, get_upstream_causal_subgraph +from ..pipeline import transformation + +__all__ = [ + 'get_multi_causal_upstream', + 'get_multi_causal_downstream', + 'get_subgraph_by_second_neighbors', +] + +log = logging.getLogger(__name__) + + +@transformation +def get_multi_causal_upstream(graph, nbunch): + """Get the union of all the 2-level deep causal upstream subgraphs from the nbunch. + + :param pybel.BELGraph graph: A BEL graph + :param nbunch: A BEL node or list of BEL nodes + :type nbunch: tuple or list[tuple] + :return: A subgraph of the original BEL graph + :rtype: pybel.BELGraph + """ + result = get_upstream_causal_subgraph(graph, nbunch) + expand_upstream_causal(graph, result) + return result + + +@transformation +def get_multi_causal_downstream(graph, nbunch): + """Get the union of all of the 2-level deep causal downstream subgraphs from the nbunch. + + :param pybel.BELGraph graph: A BEL graph + :param nbunch: A BEL node or list of BEL nodes + :type nbunch: tuple or list[tuple] + :return: A subgraph of the original BEL graph + :rtype: pybel.BELGraph + """ + result = get_downstream_causal_subgraph(graph, nbunch) + expand_downstream_causal(graph, result) + return result + + +@transformation +def get_subgraph_by_second_neighbors(graph, nodes, filter_pathologies=False): + """Get a graph around the neighborhoods of the given nodes and expand to the neighborhood of those nodes. + + Returns none if none of the nodes are in the graph. + + :param pybel.BELGraph graph: A BEL graph + :param iter[tuple] nodes: An iterable of BEL nodes + :param bool filter_pathologies: Should expansion take place around pathologies? + :return: A BEL graph induced around the neighborhoods of the given nodes + :rtype: Optional[pybel.BELGraph] + """ + result = get_subgraph_by_neighborhood(graph, nodes) + + if result is None: + return + + expand_all_node_neighborhoods(graph, result, filter_pathologies=filter_pathologies) + return result
Collapse_all_variants sometimes fails It appears non-deterministic, which is BAD
pybel/pybel
diff --git a/tests/test_dsl.py b/tests/test_dsl.py index 5a71b144..8df24c17 100644 --- a/tests/test_dsl.py +++ b/tests/test_dsl.py @@ -65,14 +65,27 @@ class TestDSL(unittest.TestCase): class TestCentralDogma(unittest.TestCase): - def get_parent(self): + """Test functions specific for :class:`CentralDogmaAbundance`s.""" + + def test_get_parent(self): + """Test the get_parent function in :class:`CentralDogmaAbundance`s.""" ab42 = protein(name='APP', namespace='HGNC', variants=[fragment(start=672, stop=713)]) app = ab42.get_parent() self.assertEqual('p(HGNC:APP)', app.as_bel()) + self.assertEqual('p(HGNC:APP, frag(672_713))', ab42.as_bel()) def test_with_variants(self): + """Test the `with_variant` function in :class:`CentralDogmaAbundance`s.""" + app = protein(name='APP', namespace='HGNC') + ab42 = app.with_variants(fragment(start=672, stop=713)) + self.assertEqual('p(HGNC:APP)', app.as_bel()) + self.assertEqual('p(HGNC:APP, frag(672_713))', ab42.as_bel()) + + def test_with_variants_list(self): + """Test the `with_variant` function in :class:`CentralDogmaAbundance`s.""" app = protein(name='APP', namespace='HGNC') ab42 = app.with_variants([fragment(start=672, stop=713)]) + self.assertEqual('p(HGNC:APP)', app.as_bel()) self.assertEqual('p(HGNC:APP, frag(672_713))', ab42.as_bel()) diff --git a/tests/test_struct/test_filters/test_edge_predicate_builders.py b/tests/test_struct/test_filters/test_edge_predicate_builders.py new file mode 100644 index 00000000..85c488fc --- /dev/null +++ b/tests/test_struct/test_filters/test_edge_predicate_builders.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- + +"""Tests for edge predicate builders.""" + +import unittest + +from pybel.constants import CITATION, CITATION_AUTHORS, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED +from pybel.struct.filters.edge_predicate_builders import ( + build_author_inclusion_filter, build_pmid_inclusion_filter, +) + +pmid1 = '1' +pmid2 = '2' +pmid3 = '3' + +author1 = '1' +author2 = '2' +author3 = '3' + + +class TestEdgePredicateBuilders(unittest.TestCase): + """Tests for edge predicate builders.""" + + def test_build_pmid_inclusion_filter(self): + pmid_inclusion_filter = build_pmid_inclusion_filter(pmid1) + + self.assertTrue(pmid_inclusion_filter({ + CITATION: { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: pmid1, + } + })) + + self.assertFalse(pmid_inclusion_filter({ + CITATION: { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: pmid2, + } + })) + + def test_build_pmid_set_inclusion_filter(self): + pmids = {pmid1, pmid2} + pmid_inclusion_filter = build_pmid_inclusion_filter(pmids) + + self.assertTrue(pmid_inclusion_filter({ + CITATION: { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: pmid1, + } + })) + + self.assertTrue(pmid_inclusion_filter({ + CITATION: { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: pmid2, + } + })) + + self.assertFalse(pmid_inclusion_filter({ + CITATION: { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: pmid3, + } + })) + + def test_build_author_inclusion_filter(self): + author_inclusion_filter = build_author_inclusion_filter(author1) + + self.assertTrue(author_inclusion_filter({ + CITATION: { + CITATION_AUTHORS: [author1] + } + })) + + self.assertTrue(author_inclusion_filter({ + CITATION: { + CITATION_AUTHORS: [author1, author2] + } + })) + + self.assertFalse(author_inclusion_filter({ + CITATION: { + CITATION_AUTHORS: [author3] + } + })) + + def test_build_author_set_inclusion_filter(self): + author = {author1, author2} + author_inclusion_filter = build_author_inclusion_filter(author) + + self.assertTrue(author_inclusion_filter({ + CITATION: { + CITATION_AUTHORS: [author1] + } + })) + + self.assertTrue(author_inclusion_filter({ + CITATION: { + CITATION_AUTHORS: [author1, author2] + } + })) + + self.assertFalse(author_inclusion_filter({ + CITATION: { + CITATION_AUTHORS: [author3] + } + })) diff --git a/tests/test_struct/test_filters/test_node_predicate_builders.py b/tests/test_struct/test_filters/test_node_predicate_builders.py index 1b520838..65cc7d4b 100644 --- a/tests/test_struct/test_filters/test_node_predicate_builders.py +++ b/tests/test_struct/test_filters/test_node_predicate_builders.py @@ -5,11 +5,14 @@ import unittest from pybel import BELGraph -from pybel.constants import GENE, PROTEIN +from pybel.constants import GENE, NAME, PROTEIN from pybel.dsl import bioprocess, gene, protein from pybel.struct import filter_nodes from pybel.struct.filters import invert_node_predicate -from pybel.struct.filters.node_predicate_builders import data_missing_key_builder, function_inclusion_filter_builder +from pybel.struct.filters.node_predicate_builders import ( + build_node_data_search, build_node_key_search, build_node_name_search, data_missing_key_builder, + function_inclusion_filter_builder, +) from pybel.testing.utils import n @@ -89,3 +92,55 @@ class TestNodePredicateBuilders(unittest.TestCase): self.assertNotIn(p1.as_tuple(), nodes) self.assertIn(p2.as_tuple(), nodes) + + def test_build_node_data_search(self): + """Test build_node_data_search.""" + + def test_key_predicate(datum): + """Check the data is greater than zero. + + :rtype: bool + """ + return 0 < datum + + key = n() + + data_predicate = build_node_data_search(key, test_key_predicate) + + graph = BELGraph() + + p1 = protein('HGNC', n()) + graph.add_node_from_data(p1) + graph.node[p1.as_tuple()][key] = 0 + self.assertFalse(data_predicate(graph, p1.as_tuple())) + + p2 = protein('HGNC', n()) + graph.add_node_from_data(p2) + graph.node[p2.as_tuple()][key] = 5 + self.assertTrue(data_predicate(graph, p2.as_tuple())) + + p3 = protein('HGNC', n()) + graph.add_node_from_data(p3) + self.assertFalse(data_predicate(graph, p3.as_tuple())) + + def test_build_node_key_search(self): + """Test build_node_key_search.""" + node_key_search = build_node_key_search(query='app', key=NAME) + node_name_search = build_node_name_search(query='app') + + graph = BELGraph() + + p1 = protein('HGNC', 'APP') + graph.add_node_from_data(p1) + self.assertTrue(node_key_search(graph, p1.as_tuple())) + self.assertTrue(node_name_search(graph, p1.as_tuple())) + + p2 = protein('MGI', 'app') + graph.add_node_from_data(p2) + self.assertTrue(node_key_search(graph, p2.as_tuple())) + self.assertTrue(node_name_search(graph, p2.as_tuple())) + + p3 = protein('HGNC', 'nope') + graph.add_node_from_data(p3) + self.assertFalse(node_key_search(graph, p3.as_tuple())) + self.assertFalse(node_name_search(graph, p3.as_tuple())) diff --git a/tests/test_struct/test_filters/test_struct_node_predicates.py b/tests/test_struct/test_filters/test_struct_node_predicates.py index dd88de00..eadbb6c1 100644 --- a/tests/test_struct/test_filters/test_struct_node_predicates.py +++ b/tests/test_struct/test_filters/test_struct_node_predicates.py @@ -423,15 +423,15 @@ class TestEdgePredicate(unittest.TestCase): self.assertFalse(is_associative_relation({RELATION: DIRECTLY_DECREASES})) def test_build_is_association(self): + """Test build_relation_predicate.""" alternate_is_associative_relation = build_relation_predicate(ASSOCIATION) - self.assertTrue(alternate_is_associative_relation({RELATION: ASSOCIATION})) + g = BELGraph() + g.add_edge(p1.as_tuple(), p2.as_tuple(), key=0, **{RELATION: ASSOCIATION}) + g.add_edge(p2.as_tuple(), p3.as_tuple(), key=0, **{RELATION: INCREASES}) - self.assertFalse(alternate_is_associative_relation({RELATION: INCREASES})) - self.assertFalse(alternate_is_associative_relation({RELATION: CAUSES_NO_CHANGE})) - self.assertFalse(alternate_is_associative_relation({RELATION: DECREASES})) - self.assertFalse(alternate_is_associative_relation({RELATION: DIRECTLY_INCREASES})) - self.assertFalse(alternate_is_associative_relation({RELATION: DIRECTLY_DECREASES})) + self.assertTrue(alternate_is_associative_relation(g, p1.as_tuple(), p2.as_tuple(), 0)) + self.assertFalse(alternate_is_associative_relation(g, p2.as_tuple(), p3.as_tuple(), 0)) def test_has_degradation(self): self.assertTrue(edge_has_degradation({SUBJECT: {MODIFIER: DEGRADATION}})) diff --git a/tests/test_struct/test_transformations/test_collapse.py b/tests/test_struct/test_transformations/test_collapse.py index a1bb26dd..ef125844 100644 --- a/tests/test_struct/test_transformations/test_collapse.py +++ b/tests/test_struct/test_transformations/test_collapse.py @@ -4,7 +4,7 @@ import unittest from pybel import BELGraph from pybel.constants import DIRECTLY_INCREASES -from pybel.dsl import abundance, gene, hgvs, mirna, pathology, protein, rna +from pybel.dsl import abundance, gene, mirna, pathology, pmod, protein, rna from pybel.struct.mutation.collapse import collapse_all_variants, collapse_nodes, collapse_to_genes from pybel.testing.utils import n @@ -15,6 +15,7 @@ CHEBI = 'CHEBI' g1 = gene(HGNC, '1') r1 = rna(HGNC, '1') p1 = protein(HGNC, '1') +p1_phosphorylated = protein(HGNC, '1', variants=[pmod('Ph')]) g2 = gene(HGNC, '2') r2 = rna(HGNC, '2') @@ -96,10 +97,9 @@ class TestCollapseDownstream(unittest.TestCase): def test_collapse_all_variants(self): graph = BELGraph() - p1_variant = p1.with_variants(hgvs('?')) - graph.add_node_from_data(p1_variant) + graph.add_node_from_data(p1_phosphorylated) - graph.add_increases(p1_variant, p2, n(), n()) + graph.add_increases(p1_phosphorylated, p2, n(), n()) self.assertEqual(3, graph.number_of_nodes()) self.assertEqual(2, graph.number_of_edges()) @@ -110,5 +110,5 @@ class TestCollapseDownstream(unittest.TestCase): self.assertEqual(1, graph.number_of_edges()) self.assertIn(p1.as_tuple(), graph) - self.assertNotIn(p1_variant.as_tuple(), graph) + self.assertNotIn(p1_phosphorylated.as_tuple(), graph) self.assertIn(p2.as_tuple(), graph) diff --git a/tests/test_struct/test_transformations/test_induction.py b/tests/test_struct/test_transformations/test_induction.py index 79c91495..9ccd9ebe 100644 --- a/tests/test_struct/test_transformations/test_induction.py +++ b/tests/test_struct/test_transformations/test_induction.py @@ -1,12 +1,21 @@ # -*- coding: utf-8 -*- +"""Tests for PyBEL induction functions.""" + import string import unittest from pybel import BELGraph -from pybel.constants import ASSOCIATION, DECREASES, FUNCTION, INCREASES, PROTEIN +from pybel.constants import ( + ASSOCIATION, CITATION_AUTHORS, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, + DECREASES, FUNCTION, INCREASES, PROTEIN, +) from pybel.dsl import gene, protein, rna -from pybel.struct.mutation import expand_upstream_causal, get_upstream_causal_subgraph +from pybel.struct.mutation.expansion import expand_upstream_causal +from pybel.struct.mutation.induction.citation import get_subgraph_by_authors, get_subgraph_by_pubmed +from pybel.struct.mutation.induction.paths import get_nodes_in_all_shortest_paths, get_subgraph_by_all_shortest_paths +from pybel.struct.mutation.induction.upstream import get_upstream_causal_subgraph +from pybel.struct.mutation.induction.utils import get_subgraph_by_induction from pybel.testing.utils import n trem2_gene = gene(namespace='HGNC', name='TREM2') @@ -14,7 +23,9 @@ trem2_rna = rna(namespace='HGNC', name='TREM2') trem2_protein = protein(namespace='HGNC', name='TREM2') -class TestInduction(unittest.TestCase): +class TestGraphMixin(unittest.TestCase): + """A mixin to enable testing nodes and edge membership in the graph.""" + def assertInGraph(self, node, graph): """Assert the node is in the graph. @@ -34,16 +45,76 @@ class TestInduction(unittest.TestCase): self.assertFalse(graph.has_node_with_data(node)) def assertInEdge(self, source, target, graph): - """ + """Assert the edge is in the graph. :param source: :param target: - :param graph: - :return: + :type graph: pybel.BELGraph + :rtype: bool """ self.assertIn(target.as_tuple(), graph[source.as_tuple()]) + +class TestInduction(TestGraphMixin): + """Test induction functions.""" + + def test_get_subgraph_by_induction(self): + """Test get_subgraph_by_induction.""" + graph = BELGraph() + keyword, url = n(), n() + graph.namespace_url[keyword] = url + a, b, c, d = [protein(namespace='test', name=n()) for _ in range(4)] + graph.add_directly_increases(a, b, n(), n()) + graph.add_directly_increases(b, c, n(), n()) + graph.add_directly_increases(c, d, n(), n()) + graph.add_increases(a, d, n(), n()) + + nodes = [b.as_tuple(), c.as_tuple()] + subgraph = get_subgraph_by_induction(graph, nodes) + + self.assertIn(keyword, subgraph.namespace_url) + self.assertEqual(url, subgraph.namespace_url[keyword]) + + self.assertNotInGraph(a, subgraph) + self.assertInGraph(b, subgraph) + self.assertInGraph(c, subgraph) + self.assertNotInGraph(d, subgraph) + + def test_get_subgraph_by_all_shortest_paths(self): + """Test get_subgraph_by_all_shortest_paths.""" + graph = BELGraph() + keyword, url = n(), n() + graph.namespace_url[keyword] = url + a, b, c, d, e, f = [protein(namespace='test', name=n()) for _ in range(6)] + graph.add_increases(a, b, n(), n()) + graph.add_increases(a, c, n(), n()) + graph.add_increases(b, d, n(), n()) + graph.add_increases(c, d, n(), n()) + graph.add_increases(a, e, n(), n()) + graph.add_increases(e, f, n(), n()) + graph.add_increases(f, d, n(), n()) + + query_nodes = [a.as_tuple(), d.as_tuple()] + shortest_paths_nodes = get_nodes_in_all_shortest_paths(graph, query_nodes) + self.assertIn(a.as_tuple(), shortest_paths_nodes) + self.assertIn(b.as_tuple(), shortest_paths_nodes) + self.assertIn(c.as_tuple(), shortest_paths_nodes) + self.assertIn(d.as_tuple(), shortest_paths_nodes) + + subgraph = get_subgraph_by_all_shortest_paths(graph, query_nodes) + + self.assertIn(keyword, subgraph.namespace_url) + self.assertEqual(url, subgraph.namespace_url[keyword]) + + self.assertInGraph(a, subgraph) + self.assertInGraph(b, subgraph) + self.assertInGraph(c, subgraph) + self.assertInGraph(d, subgraph) + self.assertNotInGraph(e, subgraph) + self.assertNotInGraph(f, subgraph) + def test_get_upstream_causal_subgraph(self): + """Test get_upstream_causal_subgraph.""" a, b, c, d, e, f = [protein(namespace='test', name=n()) for _ in range(6)] citation, evidence = '', '' @@ -110,3 +181,147 @@ class TestInduction(unittest.TestCase): self.assertInEdge(f, b, subgraph) self.assertEqual(2, len(subgraph[a.as_tuple()][b.as_tuple()])) self.assertEqual(4, subgraph.number_of_edges(), msg='\n'.join(map(str, subgraph.edges()))) + + +class TestEdgePredicateBuilders(TestGraphMixin): + """Tests for edge predicate builders.""" + + def test_build_pmid_inclusion_filter(self): + a, b, c, d = [protein(namespace='test', name=n()) for _ in range(4)] + p1, p2, p3, p4 = n(), n(), n(), n() + + graph = BELGraph() + keyword, url = n(), n() + graph.namespace_url[keyword] = url + graph.add_increases(a, b, n(), citation=p1) + graph.add_increases(a, b, n(), citation=p2) + graph.add_increases(b, c, n(), citation=p1) + graph.add_increases(b, c, n(), citation=p3) + graph.add_increases(c, d, n(), citation=p3) + + subgraph = get_subgraph_by_pubmed(graph, p1) + + self.assertIn(keyword, subgraph.namespace_url) + self.assertEqual(url, subgraph.namespace_url[keyword]) + + self.assertInGraph(a, subgraph) + self.assertInGraph(b, subgraph) + self.assertInGraph(c, subgraph) + self.assertNotInGraph(d, subgraph) + + empty_subgraph = get_subgraph_by_pubmed(graph, p4) + self.assertIn(keyword, subgraph.namespace_url) + self.assertEqual(url, subgraph.namespace_url[keyword]) + self.assertEqual(0, empty_subgraph.number_of_nodes()) + + def test_build_pmid_set_inclusion_filter(self): + a, b, c, d, e, f = [protein(namespace='test', name=n()) for _ in range(6)] + p1, p2, p3, p4, p5, p6 = n(), n(), n(), n(), n(), n() + + graph = BELGraph() + keyword, url = n(), n() + graph.namespace_url[keyword] = url + graph.add_increases(a, b, n(), citation=p1) + graph.add_increases(a, b, n(), citation=p2) + graph.add_increases(b, c, n(), citation=p1) + graph.add_increases(b, c, n(), citation=p3) + graph.add_increases(c, d, n(), citation=p3) + graph.add_increases(e, f, n(), citation=p4) + + subgraph = get_subgraph_by_pubmed(graph, [p1, p4]) + + self.assertIn(keyword, subgraph.namespace_url) + self.assertEqual(url, subgraph.namespace_url[keyword]) + + self.assertInGraph(a, subgraph) + self.assertInGraph(b, subgraph) + self.assertInGraph(c, subgraph) + self.assertNotInGraph(d, subgraph) + self.assertInGraph(e, subgraph) + self.assertInGraph(f, subgraph) + + empty_subgraph = get_subgraph_by_pubmed(graph, [p5, p6]) + self.assertIn(keyword, subgraph.namespace_url) + self.assertEqual(url, subgraph.namespace_url[keyword]) + self.assertEqual(0, empty_subgraph.number_of_nodes()) + + def test_build_author_inclusion_filter(self): + a, b, c, d = [protein(namespace='test', name=n()) for _ in range(4)] + a1, a2, a3, a4, a5 = n(), n(), n(), n(), n() + + c1 = { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: n(), + CITATION_AUTHORS: [a1, a2, a3] + } + c2 = { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: n(), + CITATION_AUTHORS: [a1, a4] + } + + graph = BELGraph() + keyword, url = n(), n() + graph.namespace_url[keyword] = url + graph.add_increases(a, b, n(), citation=c1) + graph.add_increases(a, b, n(), citation=c2) + graph.add_increases(b, c, n(), citation=c1) + graph.add_increases(c, d, n(), citation=c2) + + subgraph1 = get_subgraph_by_authors(graph, a1) + + self.assertIn(keyword, subgraph1.namespace_url) + self.assertEqual(url, subgraph1.namespace_url[keyword]) + + self.assertInGraph(a, subgraph1) + self.assertInGraph(b, subgraph1) + self.assertInGraph(c, subgraph1) + self.assertInGraph(d, subgraph1) + + subgraph2 = get_subgraph_by_authors(graph, a2) + + self.assertIn(keyword, subgraph2.namespace_url) + self.assertEqual(url, subgraph2.namespace_url[keyword]) + + self.assertInGraph(a, subgraph2) + self.assertInGraph(b, subgraph2) + self.assertInGraph(c, subgraph2) + self.assertNotInGraph(d, subgraph2) + + subgraph3 = get_subgraph_by_authors(graph, a5) + self.assertIn(keyword, subgraph3.namespace_url) + self.assertEqual(url, subgraph3.namespace_url[keyword]) + self.assertEqual(0, subgraph3.number_of_nodes()) + + def test_build_author_set_inclusion_filter(self): + a, b, c, d = [protein(namespace='test', name=n()) for _ in range(4)] + a1, a2, a3, a4, a5 = n(), n(), n(), n(), n() + + c1 = { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: n(), + CITATION_AUTHORS: [a1, a2, a3] + } + c2 = { + CITATION_TYPE: CITATION_TYPE_PUBMED, + CITATION_REFERENCE: n(), + CITATION_AUTHORS: [a1, a4] + } + + graph = BELGraph() + keyword, url = n(), n() + graph.namespace_url[keyword] = url + graph.add_increases(a, b, n(), citation=c1) + graph.add_increases(a, b, n(), citation=c2) + graph.add_increases(b, c, n(), citation=c1) + graph.add_increases(c, d, n(), citation=c2) + + subgraph1 = get_subgraph_by_authors(graph, [a1, a2]) + + self.assertIn(keyword, subgraph1.namespace_url) + self.assertEqual(url, subgraph1.namespace_url[keyword]) + + self.assertInGraph(a, subgraph1) + self.assertInGraph(b, subgraph1) + self.assertInGraph(c, subgraph1) + self.assertInGraph(d, subgraph1)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 8 }
0.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 coverage==7.2.7 decorator==5.1.1 execnet==2.0.2 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core greenlet==3.1.1 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work networkx==1.11 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/pybel/pybel.git@dca028340c84849e7b7bf6999b84b093dd52edf0#egg=PyBEL pyparsing==3.1.4 pytest==7.1.2 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 requests==2.31.0 requests-file==2.1.0 six==1.17.0 SQLAlchemy==2.0.40 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tqdm==4.67.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp @ file:///croot/zipp_1672387121353/work
name: pybel channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.2.7 - decorator==5.1.1 - execnet==2.0.2 - greenlet==3.1.1 - idna==3.10 - networkx==1.11 - pyparsing==3.1.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - requests==2.31.0 - requests-file==2.1.0 - six==1.17.0 - sqlalchemy==2.0.40 - tqdm==4.67.1 - typing-extensions==4.7.1 - urllib3==2.0.7 prefix: /opt/conda/envs/pybel
[ "tests/test_dsl.py::TestDSL::test_as_tuple", "tests/test_dsl.py::TestDSL::test_complex_with_name", "tests/test_dsl.py::TestDSL::test_str_has_both", "tests/test_dsl.py::TestDSL::test_str_has_identifier", "tests/test_dsl.py::TestDSL::test_str_has_name", "tests/test_dsl.py::TestCentralDogma::test_get_parent", "tests/test_dsl.py::TestCentralDogma::test_with_variants", "tests/test_dsl.py::TestCentralDogma::test_with_variants_list", "tests/test_struct/test_filters/test_edge_predicate_builders.py::TestEdgePredicateBuilders::test_build_author_inclusion_filter", "tests/test_struct/test_filters/test_edge_predicate_builders.py::TestEdgePredicateBuilders::test_build_author_set_inclusion_filter", "tests/test_struct/test_filters/test_edge_predicate_builders.py::TestEdgePredicateBuilders::test_build_pmid_inclusion_filter", "tests/test_struct/test_filters/test_edge_predicate_builders.py::TestEdgePredicateBuilders::test_build_pmid_set_inclusion_filter", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_build_node_data_search", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_build_node_key_search", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_data_missing_key_builder", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_empty_list_error", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_multiple", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_single", "tests/test_struct/test_filters/test_node_predicate_builders.py::TestNodePredicateBuilders::test_type_error", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_causal_source", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_fragments", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_g1_variants", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_node_exclusion_data", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_node_exclusion_tuples", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_node_inclusion_data", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_node_inclusion_tuples", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_none", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_object_has_secretion", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_object_has_translocation", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_p1_active", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_p1_data_variants", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_p1_tuple_variants", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_p2_data_variants", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_p2_tuple_variants", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_p3", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestNodePredicate::test_subject_has_secretion", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_build_is_association", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_activity", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_annotation", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_authors", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_degradation", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_polarity", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_polarity_dict", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_provenance", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_pubmed", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_has_translocation", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_is_association", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_is_causal", "tests/test_struct/test_filters/test_struct_node_predicates.py::TestEdgePredicate::test_is_direct_causal", "tests/test_struct/test_transformations/test_collapse.py::TestCollapseDownstream::test_collapse_1", "tests/test_struct/test_transformations/test_collapse.py::TestCollapseDownstream::test_collapse_all_variants", "tests/test_struct/test_transformations/test_collapse.py::TestCollapseDownstream::test_collapse_dogma_1", "tests/test_struct/test_transformations/test_collapse.py::TestCollapseDownstream::test_collapse_dogma_2", "tests/test_struct/test_transformations/test_collapse.py::TestCollapseDownstream::test_collapse_dogma_3", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_expand_upstream_causal_subgraph", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_get_subgraph_by_all_shortest_paths", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_get_subgraph_by_induction", "tests/test_struct/test_transformations/test_induction.py::TestInduction::test_get_upstream_causal_subgraph", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_author_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_author_set_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_pmid_inclusion_filter", "tests/test_struct/test_transformations/test_induction.py::TestEdgePredicateBuilders::test_build_pmid_set_inclusion_filter" ]
[]
[]
[]
MIT License
2,803
[ "src/pybel/struct/mutation/induction/neighborhood.py", "src/pybel/dsl/nodes.py", "src/pybel/struct/mutation/induction/annotations.py", "src/pybel/struct/mutation/induction/citation.py", "src/pybel/struct/graph.py", "src/pybel/struct/mutation/induction/paths.py", "src/pybel/struct/filters/edge_predicate_builders.py", "src/pybel/struct/mutation/induction/utils.py", "src/pybel/struct/mutation/induction/__init__.py", "src/pybel/struct/filters/node_predicate_builders.py", "src/pybel/struct/mutation/induction_expansion.py", "src/pybel/struct/mutation/__init__.py", "src/pybel/struct/mutation/collapse/collapse.py" ]
[ "src/pybel/struct/mutation/induction/neighborhood.py", "src/pybel/dsl/nodes.py", "src/pybel/struct/mutation/induction/annotations.py", "src/pybel/struct/mutation/induction/citation.py", "src/pybel/struct/graph.py", "src/pybel/struct/mutation/induction/paths.py", "src/pybel/struct/filters/edge_predicate_builders.py", "src/pybel/struct/mutation/induction/utils.py", "src/pybel/struct/mutation/induction/__init__.py", "src/pybel/struct/filters/node_predicate_builders.py", "src/pybel/struct/mutation/induction_expansion.py", "src/pybel/struct/mutation/__init__.py", "src/pybel/struct/mutation/collapse/collapse.py" ]
airspeed-velocity__asv-680
8f3e6786472a96c79446f6ca4d56707c90a76f22
2018-07-21 15:24:37
a42330248214dbd70595f4dff8b549d1f6c58db4
diff --git a/asv/results.py b/asv/results.py index 76dbe6b..8513b71 100644 --- a/asv/results.py +++ b/asv/results.py @@ -4,6 +4,7 @@ from __future__ import (absolute_import, division, print_function, unicode_literals) +import sys import base64 import os import zlib @@ -405,15 +406,31 @@ class Results(object): self._benchmark_version[benchmark_name] = benchmark_version if 'profile' in result and result['profile']: - self._profiles[benchmark_name] = base64.b64encode( + profile_data = base64.b64encode( zlib.compress(result['profile'])) + if sys.version_info[0] >= 3: + profile_data = profile_data.decode('ascii') + self._profiles[benchmark_name] = profile_data def get_profile(self, benchmark_name): """ Get the profile data for the given benchmark name. + + Parameters + ---------- + benchmark_name : str + Name of benchmark + + Returns + ------- + profile_data : bytes + Raw profile data + """ - return zlib.decompress( - base64.b64decode(self._profiles[benchmark_name])) + profile_data = self._profiles[benchmark_name] + if sys.version_info[0] >= 3: + profile_data = profile_data.encode('ascii') + return zlib.decompress(base64.b64decode(profile_data)) def has_profile(self, benchmark_name): """ diff --git a/asv/util.py b/asv/util.py index 748b9ba..34abb88 100644 --- a/asv/util.py +++ b/asv/util.py @@ -643,7 +643,11 @@ def write_json(path, data, api_version=None): data = dict(data) data['version'] = api_version - with long_path_open(path, 'w') as fd: + open_kwargs = {} + if sys.version_info[0] >= 3: + open_kwargs['encoding'] = 'utf-8' + + with long_path_open(path, 'w', **open_kwargs) as fd: json.dump(data, fd, indent=4, sort_keys=True) @@ -656,7 +660,11 @@ def load_json(path, api_version=None, cleanup=True): path = os.path.abspath(path) - with long_path_open(path, 'r') as fd: + open_kwargs = {} + if sys.version_info[0] >= 3: + open_kwargs['encoding'] = 'utf-8' + + with long_path_open(path, 'r', **open_kwargs) as fd: content = fd.read() if cleanup:
JSON serialization problem "TypeError: Object of type 'bytes' is not JSON serializable" during asv run --profile Python 3.6
airspeed-velocity/asv
diff --git a/test/test_results.py b/test/test_results.py index 2a7c0fb..d983a4a 100644 --- a/test/test_results.py +++ b/test/test_results.py @@ -35,13 +35,13 @@ def test_results(tmpdir): values = { 'suite1.benchmark1': {'result': [float(i * 0.001)], 'stats': [{'foo': 1}], 'samples': [[1,2]], 'number': [6], 'params': [['a']], - 'version': "1"}, + 'version': "1", 'profile': b'\x00\xff'}, 'suite1.benchmark2': {'result': [float(i * i * 0.001)], 'stats': [{'foo': 2}], 'samples': [[3,4]], 'number': [7], 'params': [], - 'version': "1"}, + 'version': "1", 'profile': b'\x00\xff'}, 'suite2.benchmark1': {'result': [float((i + 1) ** -1)], 'stats': [{'foo': 3}], 'samples': [[5,6]], 'number': [8], 'params': [['c']], - 'version': None} + 'version': None, 'profile': b'\x00\xff'} } for key, val in values.items(): @@ -66,6 +66,7 @@ def test_results(tmpdir): assert rr._stats == r._stats assert rr._number == r._number assert rr._samples == r._samples + assert rr._profiles == r._profiles assert rr.started_at == r._started_at assert rr.ended_at == r._ended_at assert rr.benchmark_version == r._benchmark_version @@ -87,6 +88,9 @@ def test_results(tmpdir): assert r2.get_result_stats(bench, bad_params) == [None, None] assert r2.get_result_samples(bench, bad_params) == ([None, None], [None, None]) + # Get profile + assert r2.get_profile(bench) == b'\x00\xff' + # Check get_result_keys mock_benchmarks = { 'suite1.benchmark1': {'version': '1'}, diff --git a/test/test_util.py b/test/test_util.py index d86167a..19c8ba7 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -269,3 +269,13 @@ def test_is_main_thread(): thread.join() assert results == [False] + + +def test_json_non_ascii(tmpdir): + non_ascii_data = [{'😼': '難', 'ä': 3}] + + fn = os.path.join(str(tmpdir), "nonascii.json") + util.write_json(fn, non_ascii_data) + data = util.load_json(fn) + + assert data == non_ascii_data diff --git a/test/test_workflow.py b/test/test_workflow.py index 2959f00..5e3983d 100644 --- a/test/test_workflow.py +++ b/test/test_workflow.py @@ -102,7 +102,7 @@ def test_run_publish(capfd, basic_conf): # Tests a typical complete run/publish workflow tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2', - '--quick', '--show-stderr', + '--quick', '--show-stderr', '--profile', _machine_file=machine_file) text, err = capfd.readouterr()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-xdist", "pytest-rerunfailures", "selenium", "feedparser", "python-hglib" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/airspeed-velocity/asv.git@8f3e6786472a96c79446f6ca4d56707c90a76f22#egg=asv attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 execnet==1.9.0 feedparser==6.0.11 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-rerunfailures==10.3 pytest-xdist==3.0.2 python-hglib==2.6.2 selenium==3.141.0 sgmllib3k==1.0.0 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: asv channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - execnet==1.9.0 - feedparser==6.0.11 - pytest-rerunfailures==10.3 - pytest-xdist==3.0.2 - python-hglib==2.6.2 - selenium==3.141.0 - sgmllib3k==1.0.0 - six==1.17.0 - urllib3==1.26.20 prefix: /opt/conda/envs/asv
[ "test/test_results.py::test_results" ]
[ "test/test_workflow.py::test_run_publish", "test/test_workflow.py::test_continuous", "test/test_workflow.py::test_find", "test/test_workflow.py::test_run_spec", "test/test_workflow.py::test_run_build_failure", "test/test_workflow.py::test_run_with_repo_subdir", "test/test_workflow.py::test_benchmark_param_selection" ]
[ "test/test_results.py::test_get_result_hash_from_prefix", "test/test_results.py::test_backward_compat_load", "test/test_results.py::test_json_timestamp", "test/test_results.py::test_iter_results", "test/test_util.py::test_parallelfailure", "test/test_util.py::test_write_unicode_to_ascii", "test/test_util.py::test_which_path", "test/test_util.py::test_write_load_json", "test/test_util.py::test_human_float", "test/test_util.py::test_human_time", "test/test_util.py::test_human_file_size", "test/test_util.py::test_is_main_thread", "test/test_util.py::test_json_non_ascii" ]
[]
BSD 3-Clause "New" or "Revised" License
2,804
[ "asv/util.py", "asv/results.py" ]
[ "asv/util.py", "asv/results.py" ]
airspeed-velocity__asv-683
47b320d6252fba81581bf37b50571bcca578096d
2018-07-21 17:52:33
a42330248214dbd70595f4dff8b549d1f6c58db4
diff --git a/asv/benchmark.py b/asv/benchmark.py index 20cf86f..8f6932f 100644 --- a/asv/benchmark.py +++ b/asv/benchmark.py @@ -470,10 +470,12 @@ class TimeBenchmark(Benchmark): samples = [s/number for s in samples] return {'samples': samples, 'number': number} - def benchmark_timing(self, timer, repeat, warmup_time, number=0): + def benchmark_timing(self, timer, repeat, warmup_time, number=0, + min_timeit_count=2): sample_time = self.sample_time start_time = time.time() + timeit_count = 0 if repeat == 0: # automatic number of samples: 10 is large enough to @@ -483,6 +485,8 @@ class TimeBenchmark(Benchmark): def too_slow(timing): # stop taking samples if limits exceeded + if timeit_count < min_timeit_count: + return False if default_number: t = 1.3*sample_time max_time = start_time + min(warmup_time + repeat * t, @@ -508,6 +512,7 @@ class TimeBenchmark(Benchmark): timing = timer.timeit(number) wall_time = time.time() - start actual_timing = max(wall_time, timing) + min_timeit_count += 1 if actual_timing >= sample_time: if time.time() > start_time + warmup_time: @@ -526,8 +531,10 @@ class TimeBenchmark(Benchmark): while True: self._redo_setup_next = False timing = timer.timeit(number) + min_timeit_count += 1 if time.time() >= start_time + warmup_time: break + if too_slow(timing): return [timing], number @@ -535,6 +542,7 @@ class TimeBenchmark(Benchmark): samples = [] for j in range(repeat): timing = timer.timeit(number) + min_timeit_count += 1 samples.append(timing) if too_slow(timing): diff --git a/asv/benchmarks.py b/asv/benchmarks.py index 8dacfbb..0dfb0ee 100644 --- a/asv/benchmarks.py +++ b/asv/benchmarks.py @@ -81,8 +81,6 @@ def run_benchmark(benchmark, root, env, show_stderr=False, - `samples`: List of lists of sampled raw data points, if benchmark produces those and was successful. - - `number`: Repeact count associated with each sample. - - `stats`: List of results of statistical analysis of data. - `profile`: If `profile` is `True` and run was at least partially successful, @@ -126,8 +124,8 @@ def run_benchmark(benchmark, root, env, show_stderr=False, if (selected_idx is not None and benchmark['params'] and param_idx not in selected_idx): # Use NaN to mark the result as skipped - bench_results.append(dict(samples=None, number=None, - result=float('nan'), stats=None)) + bench_results.append(dict(samples=None, result=float('nan'), + stats=None)) bench_profiles.append(None) continue success, data, profile_data, err, out, errcode = \ @@ -139,14 +137,13 @@ def run_benchmark(benchmark, root, env, show_stderr=False, total_count += 1 if success: if isinstance(data, dict) and 'samples' in data: - value, stats = statistics.compute_stats(data['samples']) + value, stats = statistics.compute_stats(data['samples'], + data['number']) result_data = dict(samples=data['samples'], - number=data['number'], result=value, stats=stats) else: result_data = dict(samples=None, - number=None, result=data, stats=None) @@ -155,7 +152,7 @@ def run_benchmark(benchmark, root, env, show_stderr=False, bench_profiles.append(profile_data) else: failure_count += 1 - bench_results.append(dict(samples=None, number=None, result=None, stats=None)) + bench_results.append(dict(samples=None, result=None, stats=None)) bench_profiles.append(None) if data is not None: bad_output = data @@ -181,7 +178,7 @@ def run_benchmark(benchmark, root, env, show_stderr=False, result['stderr'] += err # Produce result - for key in ['samples', 'number', 'result', 'stats']: + for key in ['samples', 'result', 'stats']: result[key] = [x[key] for x in bench_results] if benchmark['params']: diff --git a/asv/commands/run.py b/asv/commands/run.py index 69b73d2..dec7ab8 100644 --- a/asv/commands/run.py +++ b/asv/commands/run.py @@ -307,7 +307,6 @@ class Run(Command): for benchmark_name, d in six.iteritems(results): if not record_samples: d['samples'] = None - d['number'] = None benchmark_version = benchmarks[benchmark_name]['version'] result.add_result(benchmark_name, d, benchmark_version) diff --git a/asv/results.py b/asv/results.py index 8513b71..f7a4c70 100644 --- a/asv/results.py +++ b/asv/results.py @@ -213,7 +213,6 @@ class Results(object): self._date = date self._results = {} self._samples = {} - self._number = {} self._stats = {} self._benchmark_params = {} self._profiles = {} @@ -345,17 +344,11 @@ class Results(object): samples : {None, list} Raw result samples. If the benchmark is parameterized, return a list of values. - number : int - Associated repeat count """ - samples = _compatible_results(self._samples[key], - self._benchmark_params[key], - params) - number = _compatible_results(self._number[key], - self._benchmark_params[key], - params) - return samples, number + return _compatible_results(self._samples[key], + self._benchmark_params[key], + params) def get_result_params(self, key): """ @@ -370,7 +363,6 @@ class Results(object): del self._results[key] del self._benchmark_params[key] del self._samples[key] - del self._number[key] del self._stats[key] # Remove profiles (may be missing) @@ -398,7 +390,6 @@ class Results(object): """ self._results[benchmark_name] = result['result'] self._samples[benchmark_name] = result['samples'] - self._number[benchmark_name] = result['number'] self._stats[benchmark_name] = result['stats'] self._benchmark_params[benchmark_name] = result['params'] self._started_at[benchmark_name] = util.datetime_to_js_timestamp(result['started_at']) @@ -455,8 +446,6 @@ class Results(object): value = {'result': self._results[key]} if self._samples[key] and any(x is not None for x in self._samples[key]): value['samples'] = self._samples[key] - if self._number[key] and any(x is not None for x in self._number[key]): - value['number'] = self._number[key] if self._stats[key] and any(x is not None for x in self._stats[key]): value['stats'] = self._stats[key] if self._benchmark_params[key]: @@ -528,14 +517,13 @@ class Results(object): obj._results = {} obj._samples = {} - obj._number = {} obj._stats = {} obj._benchmark_params = {} for key, value in six.iteritems(d['results']): # Backward compatibility if not isinstance(value, dict): - value = {'result': [value], 'samples': None, 'number': None, + value = {'result': [value], 'samples': None, 'stats': None, 'params': []} if not isinstance(value['result'], list): @@ -545,14 +533,12 @@ class Results(object): value['stats'] = [value['stats']] value.setdefault('samples', None) - value.setdefault('number', None) value.setdefault('stats', None) value.setdefault('params', []) # Assign results obj._results[key] = value['result'] obj._samples[key] = value['samples'] - obj._number[key] = value['number'] obj._stats[key] = value['stats'] obj._benchmark_params[key] = value['params'] @@ -580,7 +566,7 @@ class Results(object): Add any existing old results that aren't overridden by the current results. """ - for dict_name in ('_samples', '_number', '_stats', + for dict_name in ('_samples', '_stats', '_benchmark_params', '_profiles', '_started_at', '_ended_at', '_benchmark_version'): old_dict = getattr(old, dict_name) diff --git a/asv/statistics.py b/asv/statistics.py index 3ef9e06..f05989e 100644 --- a/asv/statistics.py +++ b/asv/statistics.py @@ -11,7 +11,7 @@ import math from .util import inf, nan -def compute_stats(samples): +def compute_stats(samples, number): """ Statistical analysis of measured samples. @@ -19,6 +19,8 @@ def compute_stats(samples): ---------- samples : list of float List of total times (y) of benchmarks. + number : int + Repeat number for each sample. Returns ------- @@ -72,7 +74,8 @@ def compute_stats(samples): 'max': max(Y), 'mean': mean, 'std': std, - 'n': len(Y)} + 'repeat': len(Y), + 'number': number} return result, stats diff --git a/docs/source/dev.rst b/docs/source/dev.rst index 31fea20..7afae00 100644 --- a/docs/source/dev.rst +++ b/docs/source/dev.rst @@ -145,15 +145,11 @@ A benchmark suite directory has the following layout. The This key is omitted if there are no samples recorded. - - ``number``: contains the repeat count(s) associated with the - measured samples. Same format as for ``result``. - - This key is omitted if there are no samples recorded. - - ``stats``: dictionary containing results of statistical analysis. Contains keys ``ci_99`` (confidence interval estimate for the result), ``q_25``, ``q_75`` (percentiles), - ``min``, ``max``, ``mean``, ``std``, and ``n``. + ``min``, ``max``, ``mean``, ``std``, ``repeat``, and + ``number``. This key is omitted if there is no statistical analysis.
Big time difference for benchmarks that require high warmup I was running a benchmark with latest asv 0.3dev0 and I observed a big time difference from asv 0.2. * 0.2 https://github.com/poliastro/poliastro-benchmarks/blob/df6a71330f08c0c0ae1369818bc5af8106bba62f/results/ks1/d32f3ab8-conda-py3.6-matplotlib2.1-nomkl-numba-numpy1.14-scipy1.0.json#L31 * 0.3dev0 https://github.com/poliastro/poliastro-benchmarks/blob/f47fb57f2db0d76649ae7337867221b3738f057a/results/ks1/d3238535-conda-py3.6-matplotlib2.1-nomkl-numba-numpy1.14-scipy1.0.json#L34 After doing a git bisect, I arrived to [this commit](https://github.com/airspeed-velocity/asv/commit/db5cc300d39eb890292cf979d90f255b57dc9887) and [pull request](https://github.com/airspeed-velocity/asv/pull/493), which as far as I understand has not been backported to 0.2 yet. This particular benchmark calls a function accelerated with `@jit(nopython=True)`, so I suspected that warmup time had something to do with it. After playing a little bit with the parameters, I found that this diff brought back the old time measurement with asv 0.3dev0: ```diff diff --git a/benchmarks/examples.py b/benchmarks/examples.py index 13e86d8..d034eed 100644 --- a/benchmarks/examples.py +++ b/benchmarks/examples.py @@ -3,3 +3,5 @@ from poliastro.examples import iss def time_propagate_iss_one_period(): iss.propagate(iss.period) + +time_propagate_iss_one_period.repeat = 3 ``` So there is ~~a UX issue and~~ a measurement issue: * ~~The UX issue is that, as far as I know, there's no way to know _a posteriori_ how many repetitions, number of samples, etc did asv compute for a benchmark, as this information is not stored in the results.~~ * The measurement issue is that, with the current benchmark methodology, if a function requires a "big" warmup time (in this case, doing the numba JIT compilation), it will be run only once, which might or might not be representative of the intended result.
airspeed-velocity/asv
diff --git a/test/test_results.py b/test/test_results.py index d983a4a..485bfd0 100644 --- a/test/test_results.py +++ b/test/test_results.py @@ -34,13 +34,13 @@ def test_results(tmpdir): values = { 'suite1.benchmark1': {'result': [float(i * 0.001)], 'stats': [{'foo': 1}], - 'samples': [[1,2]], 'number': [6], 'params': [['a']], + 'samples': [[1,2]], 'params': [['a']], 'version': "1", 'profile': b'\x00\xff'}, 'suite1.benchmark2': {'result': [float(i * i * 0.001)], 'stats': [{'foo': 2}], - 'samples': [[3,4]], 'number': [7], 'params': [], + 'samples': [[3,4]], 'params': [], 'version': "1", 'profile': b'\x00\xff'}, 'suite2.benchmark1': {'result': [float((i + 1) ** -1)], 'stats': [{'foo': 3}], - 'samples': [[5,6]], 'number': [8], 'params': [['c']], + 'samples': [[5,6]], 'params': [['c']], 'version': None, 'profile': b'\x00\xff'} } @@ -64,7 +64,6 @@ def test_results(tmpdir): for rr in [r2, r3]: assert rr._results == r._results assert rr._stats == r._stats - assert rr._number == r._number assert rr._samples == r._samples assert rr._profiles == r._profiles assert rr.started_at == r._started_at @@ -79,14 +78,13 @@ def test_results(tmpdir): assert params == values[bench]['params'] assert r2.get_result_value(bench, params) == values[bench]['result'] assert r2.get_result_stats(bench, params) == values[bench]['stats'] - assert r2.get_result_samples(bench, params) == (values[bench]['samples'], - values[bench]['number']) + assert r2.get_result_samples(bench, params) == values[bench]['samples'] # Get with different parameters than stored (should return n/a) bad_params = [['foo', 'bar']] assert r2.get_result_value(bench, bad_params) == [None, None] assert r2.get_result_stats(bench, bad_params) == [None, None] - assert r2.get_result_samples(bench, bad_params) == ([None, None], [None, None]) + assert r2.get_result_samples(bench, bad_params) == [None, None] # Get profile assert r2.get_profile(bench) == b'\x00\xff' @@ -150,7 +148,6 @@ def test_json_timestamp(tmpdir): 'params': [], 'stats': None, 'samples': None, - 'number': None, 'started_at': stamp1, 'ended_at': stamp2 } diff --git a/test/test_statistics.py b/test/test_statistics.py index 2bba2ad..6a19a28 100644 --- a/test/test_statistics.py +++ b/test/test_statistics.py @@ -30,14 +30,15 @@ except ImportError: def test_compute_stats(): np.random.seed(1) - assert statistics.compute_stats([]) == (None, None) - assert statistics.compute_stats([15.0]) == (15.0, None) + assert statistics.compute_stats([], 1) == (None, None) + assert statistics.compute_stats([15.0], 1) == (15.0, None) for nsamples, true_mean in product([10, 50, 250], [0, 0.3, 0.6]): samples = np.random.randn(nsamples) + true_mean - result, stats = statistics.compute_stats(samples) + result, stats = statistics.compute_stats(samples, 42) - assert np.allclose(stats['n'], len(samples)) + assert stats['repeat'] == len(samples) + assert stats['number'] == 42 assert np.allclose(stats['mean'], np.mean(samples)) assert np.allclose(stats['q_25'], np.percentile(samples, 25)) assert np.allclose(stats['q_75'], np.percentile(samples, 75)) @@ -64,8 +65,8 @@ def test_is_different(): for true_mean, n, significant in [(0.05, 10, False), (0.05, 100, True), (0.1, 10, True)]: samples_a = 0 + 0.1 * np.random.rand(n) samples_b = true_mean + 0.1 * np.random.rand(n) - result_a, stats_a = statistics.compute_stats(samples_a) - result_b, stats_b = statistics.compute_stats(samples_b) + result_a, stats_a = statistics.compute_stats(samples_a, 1) + result_b, stats_b = statistics.compute_stats(samples_b, 1) assert statistics.is_different(stats_a, stats_b) == significant
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 6 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/airspeed-velocity/asv.git@47b320d6252fba81581bf37b50571bcca578096d#egg=asv exceptiongroup==1.2.2 execnet==2.1.1 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-xdist==3.6.1 six==1.17.0 tomli==2.2.1
name: asv channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - execnet==2.1.1 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-xdist==3.6.1 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/asv
[ "test/test_results.py::test_results", "test/test_results.py::test_json_timestamp" ]
[]
[ "test/test_results.py::test_get_result_hash_from_prefix", "test/test_results.py::test_backward_compat_load", "test/test_results.py::test_iter_results", "test/test_statistics.py::test_quantile_ci_small", "test/test_statistics.py::test_quantile_ci_r", "test/test_statistics.py::test_laplace_posterior_basic" ]
[]
BSD 3-Clause "New" or "Revised" License
2,806
[ "asv/statistics.py", "asv/commands/run.py", "asv/results.py", "asv/benchmarks.py", "asv/benchmark.py", "docs/source/dev.rst" ]
[ "asv/statistics.py", "asv/commands/run.py", "asv/results.py", "asv/benchmarks.py", "asv/benchmark.py", "docs/source/dev.rst" ]
jonathanj__eliottree-68
983999c822a83258d4f3bc3086925649bcf672f2
2018-07-21 19:01:29
86b5043c1c308624ae29de82606e96d5a65f76f4
diff --git a/eliottree/_cli.py b/eliottree/_cli.py index 0325c73..f10389e 100644 --- a/eliottree/_cli.py +++ b/eliottree/_cli.py @@ -77,12 +77,14 @@ def display_tasks(tasks, color, ignored_fields, field_limit, human_readable): the task trees to stdout. """ write = text_writer(sys.stdout).write + write_err = text_writer(sys.stderr).write if color == 'auto': colorize = sys.stdout.isatty() else: colorize = color == 'always' render_tasks( write=write, + write_err=write_err, tasks=tasks, ignored_fields=set(ignored_fields) or None, field_limit=field_limit, diff --git a/eliottree/_render.py b/eliottree/_render.py index 59ee982..c8fc3b9 100644 --- a/eliottree/_render.py +++ b/eliottree/_render.py @@ -1,13 +1,17 @@ +import sys +import traceback from functools import partial from eliot._action import WrittenAction from eliot._message import WrittenMessage from eliot._parse import Task +from six import text_type from termcolor import colored -from toolz import compose, identity +from toolz import compose, excepts, identity from tree_format import format_tree from eliottree import format +from eliottree._util import eliot_ns, format_namespace, is_namespace RIGHT_DOUBLE_ARROW = u'\u21d2' @@ -33,6 +37,7 @@ class COLORS(object): success = Color('green') failure = Color('red') prop = Color('blue') + error = Color('red', ['bold']) def __init__(self, colored): self.colored = colored @@ -52,7 +57,7 @@ def _default_value_formatter(human_readable, field_limit, encoding='utf-8'): fields = {} if human_readable: fields = { - u'timestamp': format.timestamp(), + eliot_ns(u'timestamp'): format.timestamp(), } return compose( # We want tree-format to handle newlines. @@ -126,6 +131,8 @@ def format_node(format_value, colors, node): value = u'' else: value = format_value(value, key) + if is_namespace(key): + key = format_namespace(key) return u'{}: {}'.format( colors.prop(format.escape_control_characters(key)), value) @@ -138,13 +145,17 @@ def message_fields(message, ignored_fields): """ def _items(): try: - yield u'timestamp', message.timestamp + yield eliot_ns('timestamp'), message.timestamp except KeyError: pass for key, value in message.contents.items(): if key not in ignored_fields: yield key, value - return sorted(_items()) if message else [] + + def _sortkey(x): + k = x[0] + return format_namespace(k) if is_namespace(k) else k + return sorted(_items(), key=_sortkey) if message else [] def get_children(ignored_fields, node): @@ -176,8 +187,20 @@ def get_children(ignored_fields, node): return [] +def track_exceptions(f, caught, default=None): + """ + Decorate ``f`` with a function that traps exceptions and appends them to + ``caught``, returning ``default`` in their place. + """ + def _catch(_): + caught.append(sys.exc_info()) + return default + return excepts(Exception, f, _catch) + + def render_tasks(write, tasks, field_limit=0, ignored_fields=None, - human_readable=False, colorize=False): + human_readable=False, colorize=False, write_err=None, + format_node=format_node, format_value=None): """ Render Eliot tasks as an ASCII tree. @@ -193,18 +216,44 @@ def render_tasks(write, tasks, field_limit=0, ignored_fields=None, most Eliot metadata. :param bool human_readable: Render field values as human-readable? :param bool colorize: Colorized the output? + :type write_err: Callable[[`text_type`], None] + :param write_err: Callable used to write errors. + :param format_node: See `format_node`. + :type format_value: Callable[[Any], `text_type`] + :param format_value: Callable to format a value. """ if ignored_fields is None: ignored_fields = DEFAULT_IGNORED_KEYS - _format_node = partial( - format_node, - _default_value_formatter(human_readable=human_readable, - field_limit=field_limit), - COLORS(colored if colorize else _no_color)) + colors = COLORS(colored if colorize else _no_color) + caught_exceptions = [] + if format_value is None: + format_value = _default_value_formatter( + human_readable=human_readable, + field_limit=field_limit) + _format_value = track_exceptions( + format_value, + caught_exceptions, + u'<value formatting exception>') + _format_node = track_exceptions( + partial(format_node, _format_value, colors), + caught_exceptions, + u'<node formatting exception>') _get_children = partial(get_children, ignored_fields) for task in tasks: write(format_tree(task, _format_node, _get_children)) write(u'\n') + if write_err and caught_exceptions: + write_err( + colors.error( + u'Exceptions ({}) occurred during processing:\n'.format( + len(caught_exceptions)))) + for exc in caught_exceptions: + for line in traceback.format_exception(*exc): + if not isinstance(line, text_type): + line = line.decode('utf-8') + write_err(line) + write_err(u'\n') + __all__ = ['render_tasks'] diff --git a/eliottree/_util.py b/eliottree/_util.py new file mode 100644 index 0000000..3ef7008 --- /dev/null +++ b/eliottree/_util.py @@ -0,0 +1,36 @@ +from collections import namedtuple + + +namespace = namedtuple('namespace', ['prefix', 'name']) + + +def namespaced(prefix): + """ + Create a function that creates new names in the ``prefix`` namespace. + + :rtype: Callable[[unicode], `namespace`] + """ + return lambda name: namespace(prefix, name) + + +def format_namespace(ns): + """ + Format a `namespace`. + + :rtype: unicode + """ + if not is_namespace(ns): + raise TypeError('Expected namespace', ns) + return u'{}/{}'.format(ns.prefix, ns.name) + + +def is_namespace(x): + """ + Is this a `namespace` instance? + + :rtype: bool + """ + return isinstance(x, namespace) + + +eliot_ns = namespaced(u'eliot') diff --git a/eliottree/render.py b/eliottree/render.py index 58d2992..e12f3aa 100644 --- a/eliottree/render.py +++ b/eliottree/render.py @@ -7,6 +7,7 @@ from tree_format import format_tree from eliottree._render import ( COLORS, DEFAULT_IGNORED_KEYS, _default_value_formatter, _no_color) +from eliottree._util import is_namespace, eliot_ns from eliottree.format import escape_control_characters @@ -81,7 +82,10 @@ def get_name_factory(colors): if isinstance(task, text_type): return escape_control_characters(task) elif isinstance(task, tuple): - name = escape_control_characters(task[0]) + key = task[0] + if is_namespace(key): + key = key.name + name = escape_control_characters(key) if isinstance(task[1], dict): return name elif isinstance(task[1], text_type): @@ -129,6 +133,8 @@ def get_children_factory(ignored_task_keys, format_value): def items_children(items): for key, value in sorted(items): if key not in ignored_task_keys: + if key == u'timestamp': + key = eliot_ns(key) if isinstance(value, dict): yield key, value else: @@ -150,7 +156,10 @@ def get_children_factory(ignored_task_keys, format_value): return else: for child in items_children(task.task.items()): - yield child + if child[0] == u'timestamp': + yield eliot_ns(child[0]), child[1] + else: + yield child for child in task.children(): yield child return get_children diff --git a/setup.py b/setup.py index e47a7ef..85950e5 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ setup( "six>=1.9.0", "jmespath>=0.7.1", "iso8601>=0.1.10", - "tree-format>=0.1.2", + "tree-format>=0.1.1", "termcolor>=1.1.0", "toolz>=0.8.2", "eliot>=0.12.0",
Handle exceptions during formatting more gracefully Sometimes timestamps are missing / None for example: ``` File "/home/mithrandi/code/eliottree/eliottree/format.py", line 47, in _format_timestamp_value result = datetime.utcfromtimestamp(float(value)).isoformat(' ') TypeError: float() argument must be a string or a number ``` `--raw` will work around this but then you get no formatting for _anything_.
jonathanj/eliottree
diff --git a/eliottree/test/test_cli.py b/eliottree/test/test_cli.py index 0d7d9bd..1612f1b 100644 --- a/eliottree/test/test_cli.py +++ b/eliottree/test/test_cli.py @@ -13,9 +13,9 @@ from eliottree.test.tasks import message_task, missing_uuid_task rendered_message_task = ( u'cdeb220d-7605-4d5f-8341-1a170222e308\n' u'\u2514\u2500\u2500 twisted:log/1\n' + u' \u251c\u2500\u2500 eliot/timestamp: 2015-03-03 04:25:00\n' u' \u251c\u2500\u2500 error: False\n' - u' \u251c\u2500\u2500 message: Main loop terminated.\n' - u' \u2514\u2500\u2500 timestamp: 2015-03-03 04:25:00\n\n' + u' \u2514\u2500\u2500 message: Main loop terminated.\n\n' ).encode('utf-8') diff --git a/eliottree/test/test_render.py b/eliottree/test/test_render.py index 3946f7b..0d612d7 100644 --- a/eliottree/test/test_render.py +++ b/eliottree/test/test_render.py @@ -4,13 +4,15 @@ from termcolor import colored from testtools import ExpectedException, TestCase from testtools.matchers import AfterPreprocessing as After from testtools.matchers import ( - Contains, EndsWith, Equals, HasLength, IsDeprecated, StartsWith) + Contains, EndsWith, Equals, HasLength, IsDeprecated, MatchesAll, + MatchesListwise, StartsWith) from eliottree import ( Tree, render_task_nodes, render_tasks, tasks_from_iterable) from eliottree._render import ( COLORS, RIGHT_DOUBLE_ARROW, _default_value_formatter, _no_color, format_node, get_children, message_fields, message_name) +from eliottree._util import eliot_ns from eliottree.render import get_name_factory from eliottree.test.matchers import ExactlyEquals from eliottree.test.tasks import ( @@ -72,7 +74,7 @@ class DefaultValueFormatterTests(TestCase): def test_timestamp_field(self): """ - Format ``timestamp`` fields as human-readable if the feature was + Format Eliot ``timestamp`` fields as human-readable if the feature was requested. """ format_value = _default_value_formatter( @@ -80,12 +82,26 @@ class DefaultValueFormatterTests(TestCase): # datetime(2015, 6, 6, 22, 57, 12) now = 1433631432 self.assertThat( - format_value(now, u'timestamp'), + format_value(now, eliot_ns(u'timestamp')), ExactlyEquals(u'2015-06-06 22:57:12')) self.assertThat( - format_value(str(now), u'timestamp'), + format_value(str(now), eliot_ns(u'timestamp')), ExactlyEquals(u'2015-06-06 22:57:12')) + def test_not_eliot_timestamp_field(self): + """ + Do not format user fields named ``timestamp``. + """ + format_value = _default_value_formatter( + human_readable=True, field_limit=0) + now = 1433631432 + self.assertThat( + format_value(now, u'timestamp'), + ExactlyEquals(text_type(now))) + self.assertThat( + format_value(text_type(now), u'timestamp'), + ExactlyEquals(text_type(now))) + def test_timestamp_field_not_human(self): """ Do not format ``timestamp`` fields as human-readable if the feature was @@ -668,8 +684,9 @@ class MessageFieldsTests(TestCase): message = WrittenMessage.from_dict({u'a': 1, u'timestamp': 12345678}) self.assertThat( message_fields(message, set()), - Equals([(u'a', 1), - (u'timestamp', 12345678)])) + Equals([ + (u'a', 1), + (eliot_ns(u'timestamp'), 12345678)])) def test_ignored_fields(self): """ @@ -711,7 +728,7 @@ class GetChildrenTests(TestCase): Equals([ (u'action_status', start_message.contents.action_status), (u'action_type', start_message.contents.action_type), - (u'timestamp', start_message.timestamp)])) + (eliot_ns(u'timestamp'), start_message.timestamp)])) def test_written_action_start(self): """ @@ -726,7 +743,7 @@ class GetChildrenTests(TestCase): Equals([ (u'action_status', start_message.contents.action_status), (u'action_type', start_message.contents.action_type), - (u'timestamp', start_message.timestamp)])) + (eliot_ns(u'timestamp'), start_message.timestamp)])) def test_written_action_children(self): """ @@ -822,10 +839,47 @@ class RenderTasksTests(TestCase): """ def render_tasks(self, iterable, **kw): fd = StringIO() + err = StringIO(u'') tasks = tasks_from_iterable(iterable) - render_tasks(write=fd.write, tasks=tasks, **kw) + render_tasks(write=fd.write, write_err=err.write, tasks=tasks, **kw) + if err.tell(): + return fd.getvalue(), err.getvalue() return fd.getvalue() + def test_format_node_failures(self): + """ + Catch exceptions when formatting nodes and display a message without + interrupting the processing of tasks. List all caught exceptions to + stderr. + """ + def bad_format_node(*a, **kw): + raise ValueError('Nope') + self.assertThat( + self.render_tasks([message_task], + format_node=bad_format_node), + MatchesListwise([ + Contains(u'<node formatting exception>'), + MatchesAll( + Contains(u'Traceback (most recent call last):'), + Contains(u'ValueError: Nope'))])) + + def test_format_value_failures(self): + """ + Catch exceptions when formatting node values and display a message + without interrupting the processing of tasks. List all caught + exceptions to stderr. + """ + def bad_format_value(*a, **kw): + raise ValueError('Nope') + self.assertThat( + self.render_tasks([message_task], + format_value=bad_format_value), + MatchesListwise([ + Contains(u'message: <value formatting exception>'), + MatchesAll( + Contains(u'Traceback (most recent call last):'), + Contains(u'ValueError: Nope'))])) + def test_tasks(self): """ Render two tasks of sequential levels, by default most standard Eliot @@ -836,9 +890,9 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 timestamp: 1425356800\n' + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\n' u' \u2514\u2500\u2500 app:action/2 \u21d2 succeeded\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\n\n')) + u' \u2514\u2500\u2500 eliot/timestamp: 1425356800\n\n')) def test_tasks_human_readable(self): """ @@ -851,9 +905,11 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 timestamp: 2015-03-03 04:26:40\n' + u' \u251c\u2500\u2500 eliot/timestamp: ' + u'2015-03-03 04:26:40\n' u' \u2514\u2500\u2500 app:action/2 \u21d2 succeeded\n' - u' \u2514\u2500\u2500 timestamp: 2015-03-03 04:26:40\n' + u' \u2514\u2500\u2500 eliot/timestamp: ' + u'2015-03-03 04:26:40\n' u'\n')) def test_multiline_field(self): @@ -871,9 +927,9 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 message: this is a\u23ce\n' - u' \u2502 many line message\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\n\n')) + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\n' + u' \u2514\u2500\u2500 message: this is a\u23ce\n' + u' many line message\n\n')) def test_multiline_field_limit(self): """ @@ -886,8 +942,8 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 message: this is a\u2026\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\n\n')) + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\n' + u' \u2514\u2500\u2500 message: this is a\u2026\n\n')) def test_field_limit(self): """ @@ -899,9 +955,9 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'cdeb220d-7605-4d5f-8341-1a170222e308\n' u'\u2514\u2500\u2500 twisted:log/1\n' + u' \u251c\u2500\u2500 eliot/timestamp: 14253\u2026\n' u' \u251c\u2500\u2500 error: False\n' - u' \u251c\u2500\u2500 message: Main \u2026\n' - u' \u2514\u2500\u2500 timestamp: 14253\u2026\n\n')) + u' \u2514\u2500\u2500 message: Main \u2026\n\n')) def test_ignored_keys(self): """ @@ -914,7 +970,7 @@ class RenderTasksTests(TestCase): u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' u' \u251c\u2500\u2500 action_status: started\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\n\n')) + u' \u2514\u2500\u2500 eliot/timestamp: 1425356800\n\n')) def test_task_data(self): """ @@ -925,9 +981,9 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'cdeb220d-7605-4d5f-8341-1a170222e308\n' u'\u2514\u2500\u2500 twisted:log/1\n' + u' \u251c\u2500\u2500 eliot/timestamp: 1425356700\n' u' \u251c\u2500\u2500 error: False\n' - u' \u251c\u2500\u2500 message: Main loop terminated.\n' - u' \u2514\u2500\u2500 timestamp: 1425356700\n\n')) + u' \u2514\u2500\u2500 message: Main loop terminated.\n\n')) def test_dict_data(self): """ @@ -938,9 +994,9 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 some_data: \n' - u' \u2502 \u2514\u2500\u2500 a: 42\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\n\n')) + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\n' + u' \u2514\u2500\u2500 some_data: \n' + u' \u2514\u2500\u2500 a: 42\n\n')) def test_list_data(self): """ @@ -951,10 +1007,10 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 some_data: \n' - u' \u2502 \u251c\u2500\u2500 0: a\n' - u' \u2502 \u2514\u2500\u2500 1: b\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\n\n')) + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\n' + u' \u2514\u2500\u2500 some_data: \n' + u' \u251c\u2500\u2500 0: a\n' + u' \u2514\u2500\u2500 1: b\n\n')) def test_nested(self): """ @@ -965,9 +1021,9 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'f3a32bb3-ea6b-457c-aa99-08a3d0491ab4\n' u'\u2514\u2500\u2500 app:action/1 \u21d2 started\n' - u' \u251c\u2500\u2500 timestamp: 1425356800\n' + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\n' u' \u2514\u2500\u2500 app:action:nest/1/1 \u21d2 started\n' - u' \u2514\u2500\u2500 timestamp: 1425356900\n\n')) + u' \u2514\u2500\u2500 eliot/timestamp: 1425356900\n\n')) def test_janky_message(self): """ @@ -979,10 +1035,10 @@ class RenderTasksTests(TestCase): ExactlyEquals( u'cdeb220d-7605-4d5f-\u241b(08341-1a170222e308\n' u'\u2514\u2500\u2500 M\u241b(0/1\n' + u' \u251c\u2500\u2500 eliot/timestamp: 1425356700\n' u' \u251c\u2500\u2500 er\u241bror: False\n' - u' \u251c\u2500\u2500 mes\u240asage: ' - u'Main loop\u241b(0terminated.\n' - u' \u2514\u2500\u2500 timestamp: 1425356700\n\n')) + u' \u2514\u2500\u2500 mes\u240asage: ' + u'Main loop\u241b(0terminated.\n\n')) def test_janky_action(self): """ @@ -996,8 +1052,9 @@ class RenderTasksTests(TestCase): u'\u2514\u2500\u2500 A\u241b(0/1 \u21d2 started\n' u' \u251c\u2500\u2500 \u241b(0: \n' u' \u2502 \u2514\u2500\u2500 \u241b(0: nope\n' - u' \u251c\u2500\u2500 mes\u240asage: hello\u241b(0world\n' - u' \u2514\u2500\u2500 timestamp: 1425356800\u241b(0\n\n')) + u' \u251c\u2500\u2500 eliot/timestamp: 1425356800\u241b(0\n' + u' \u2514\u2500\u2500 mes\u240asage: hello\u241b(0world\n\n' + )) def test_colorize(self): """ @@ -1013,11 +1070,11 @@ class RenderTasksTests(TestCase): colors.parent(u'app:action'), colors.success(u'started')), u' \u251c\u2500\u2500 {}: {}'.format( - colors.prop(u'timestamp'), u'1425356800'), + colors.prop(u'eliot/timestamp'), u'1425356800'), u' \u2514\u2500\u2500 {}/2 \u21d2 {}'.format( colors.parent(u'app:action'), colors.success(u'succeeded')), u' \u2514\u2500\u2500 {}: {}'.format( - colors.prop('timestamp'), u'1425356800'), + colors.prop('eliot/timestamp'), u'1425356800'), u'\n', ]))) diff --git a/eliottree/test/test_util.py b/eliottree/test/test_util.py new file mode 100644 index 0000000..c75cdde --- /dev/null +++ b/eliottree/test/test_util.py @@ -0,0 +1,64 @@ +from testtools import ExpectedException, TestCase +from testtools.matchers import AfterPreprocessing as After +from testtools.matchers import ( + Equals, Is, MatchesAll, MatchesListwise, MatchesPredicate, + MatchesStructure) + +from eliottree._util import format_namespace, is_namespace, namespaced + + +class NamespaceTests(TestCase): + """ + Tests for `namespaced`, `format_namespace` and `is_namespace`. + """ + def test_namespaced(self): + """ + `namespaced` creates a function that when called produces a namespaced + name. + """ + self.assertThat( + namespaced(u'foo'), + MatchesAll( + MatchesPredicate(callable, '%s is not callable'), + After( + lambda f: f(u'bar'), + MatchesAll( + MatchesListwise([ + Equals(u'foo'), + Equals(u'bar')]), + MatchesStructure( + prefix=Equals(u'foo'), + name=Equals(u'bar')))))) + + def test_format_not_namespace(self): + """ + `format_namespace` raises `TypeError` if its argument is not a + namespaced name. + """ + with ExpectedException(TypeError): + format_namespace(42) + + def test_format_namespace(self): + """ + `format_namespace` creates a text representation of a namespaced name. + """ + self.assertThat( + format_namespace(namespaced(u'foo')(u'bar')), + Equals(u'foo/bar')) + + def test_is_namespace(self): + """ + `is_namespace` returns ``True`` only for namespaced names. + """ + self.assertThat( + is_namespace(42), + Is(False)) + self.assertThat( + is_namespace((u'foo', u'bar')), + Is(False)) + self.assertThat( + is_namespace(namespaced(u'foo')), + Is(False)) + self.assertThat( + is_namespace(namespaced(u'foo')(u'bar')), + Is(True))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 4 }
17.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
boltons==25.0.0 eliot==1.17.5 -e git+https://github.com/jonathanj/eliottree.git@983999c822a83258d4f3bc3086925649bcf672f2#egg=eliot_tree exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work iso8601==2.1.0 jmespath==1.0.1 orjson==3.10.16 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pyrsistent==0.20.0 pytest @ file:///croot/pytest_1738938843180/work six==1.17.0 termcolor==3.0.0 testtools==2.7.2 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work toolz==1.0.0 tree-format==0.1.2 zope.interface==7.2
name: eliottree channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - boltons==25.0.0 - eliot==1.17.5 - iso8601==2.1.0 - jmespath==1.0.1 - orjson==3.10.16 - pyrsistent==0.20.0 - six==1.17.0 - termcolor==3.0.0 - testtools==2.7.2 - toolz==1.0.0 - tree-format==0.1.2 - zope-interface==7.2 prefix: /opt/conda/envs/eliottree
[ "eliottree/test/test_cli.py::EndToEndTests::test_eliot_parse_error", "eliottree/test/test_cli.py::EndToEndTests::test_file", "eliottree/test/test_cli.py::EndToEndTests::test_json_parse_error", "eliottree/test/test_cli.py::EndToEndTests::test_stdin", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_anything", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_bytes", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_not_eliot_timestamp_field", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_timestamp_field", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_timestamp_field_not_human", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_unicode", "eliottree/test/test_render.py::DefaultValueFormatterTests::test_unicode_control_characters", "eliottree/test/test_render.py::RenderTaskNodesTests::test_colorize", "eliottree/test/test_render.py::RenderTaskNodesTests::test_deprecated", "eliottree/test/test_render.py::RenderTaskNodesTests::test_dict_data", "eliottree/test/test_render.py::RenderTaskNodesTests::test_field_limit", "eliottree/test/test_render.py::RenderTaskNodesTests::test_ignored_keys", "eliottree/test/test_render.py::RenderTaskNodesTests::test_janky_action", "eliottree/test/test_render.py::RenderTaskNodesTests::test_janky_message", "eliottree/test/test_render.py::RenderTaskNodesTests::test_multiline_field", "eliottree/test/test_render.py::RenderTaskNodesTests::test_multiline_field_limit", "eliottree/test/test_render.py::RenderTaskNodesTests::test_nested", "eliottree/test/test_render.py::RenderTaskNodesTests::test_task_data", "eliottree/test/test_render.py::RenderTaskNodesTests::test_tasks", "eliottree/test/test_render.py::RenderTaskNodesTests::test_tasks_human_readable", "eliottree/test/test_render.py::GetNameFactoryTests::test_node", "eliottree/test/test_render.py::GetNameFactoryTests::test_node_failure", "eliottree/test/test_render.py::GetNameFactoryTests::test_node_success", "eliottree/test/test_render.py::GetNameFactoryTests::test_text", "eliottree/test/test_render.py::GetNameFactoryTests::test_tuple_dict", "eliottree/test/test_render.py::GetNameFactoryTests::test_tuple_root", "eliottree/test/test_render.py::GetNameFactoryTests::test_tuple_unicode", "eliottree/test/test_render.py::MessageNameTests::test_action_status", "eliottree/test/test_render.py::MessageNameTests::test_action_status_failed", "eliottree/test/test_render.py::MessageNameTests::test_action_status_success", "eliottree/test/test_render.py::MessageNameTests::test_action_task_level", "eliottree/test/test_render.py::MessageNameTests::test_action_type", "eliottree/test/test_render.py::MessageNameTests::test_message_task_level", "eliottree/test/test_render.py::MessageNameTests::test_message_type", "eliottree/test/test_render.py::MessageNameTests::test_unknown", "eliottree/test/test_render.py::FormatNodeTests::test_other", "eliottree/test/test_render.py::FormatNodeTests::test_task", "eliottree/test/test_render.py::FormatNodeTests::test_tuple_dict", "eliottree/test/test_render.py::FormatNodeTests::test_tuple_list", "eliottree/test/test_render.py::FormatNodeTests::test_tuple_other", "eliottree/test/test_render.py::FormatNodeTests::test_written_action", "eliottree/test/test_render.py::MessageFieldsTests::test_empty", "eliottree/test/test_render.py::MessageFieldsTests::test_fields", "eliottree/test/test_render.py::MessageFieldsTests::test_ignored_fields", "eliottree/test/test_render.py::GetChildrenTests::test_other", "eliottree/test/test_render.py::GetChildrenTests::test_task_action", "eliottree/test/test_render.py::GetChildrenTests::test_tuple_dict", "eliottree/test/test_render.py::GetChildrenTests::test_tuple_list", "eliottree/test/test_render.py::GetChildrenTests::test_written_action_children", "eliottree/test/test_render.py::GetChildrenTests::test_written_action_end", "eliottree/test/test_render.py::GetChildrenTests::test_written_action_ignored_fields", "eliottree/test/test_render.py::GetChildrenTests::test_written_action_no_children", "eliottree/test/test_render.py::GetChildrenTests::test_written_action_no_end", "eliottree/test/test_render.py::GetChildrenTests::test_written_action_start", "eliottree/test/test_render.py::GetChildrenTests::test_written_message", "eliottree/test/test_render.py::RenderTasksTests::test_colorize", "eliottree/test/test_render.py::RenderTasksTests::test_dict_data", "eliottree/test/test_render.py::RenderTasksTests::test_field_limit", "eliottree/test/test_render.py::RenderTasksTests::test_format_node_failures", "eliottree/test/test_render.py::RenderTasksTests::test_format_value_failures", "eliottree/test/test_render.py::RenderTasksTests::test_ignored_keys", "eliottree/test/test_render.py::RenderTasksTests::test_janky_action", "eliottree/test/test_render.py::RenderTasksTests::test_janky_message", "eliottree/test/test_render.py::RenderTasksTests::test_list_data", "eliottree/test/test_render.py::RenderTasksTests::test_multiline_field", "eliottree/test/test_render.py::RenderTasksTests::test_multiline_field_limit", "eliottree/test/test_render.py::RenderTasksTests::test_nested", "eliottree/test/test_render.py::RenderTasksTests::test_task_data", "eliottree/test/test_render.py::RenderTasksTests::test_tasks", "eliottree/test/test_render.py::RenderTasksTests::test_tasks_human_readable", "eliottree/test/test_util.py::NamespaceTests::test_format_namespace", "eliottree/test/test_util.py::NamespaceTests::test_format_not_namespace", "eliottree/test/test_util.py::NamespaceTests::test_is_namespace", "eliottree/test/test_util.py::NamespaceTests::test_namespaced" ]
[]
[]
[]
MIT License
2,807
[ "setup.py", "eliottree/_cli.py", "eliottree/_util.py", "eliottree/render.py", "eliottree/_render.py" ]
[ "setup.py", "eliottree/_cli.py", "eliottree/_util.py", "eliottree/render.py", "eliottree/_render.py" ]
pybel__pybel-319
499c96c87bd4e94854f509b83d221a357902c57d
2018-07-21 20:29:04
6b0eb5dcb19400f3a64ac4830747bfe8dcbe8141
codecov[bot]: # [Codecov](https://codecov.io/gh/pybel/pybel/pull/319?src=pr&el=h1) Report > :exclamation: No coverage uploaded for pull request base (`develop@45580a5`). [Click here to learn what that means](https://docs.codecov.io/docs/error-reference#section-missing-base-commit). > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/pybel/pybel/pull/319/graphs/tree.svg?width=650&src=pr&token=J7joRTRygG&height=150)](https://codecov.io/gh/pybel/pybel/pull/319?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## develop #319 +/- ## ========================================== Coverage ? 87.66% ========================================== Files ? 124 Lines ? 6122 Branches ? 922 ========================================== Hits ? 5367 Misses ? 573 Partials ? 182 ``` | [Impacted Files](https://codecov.io/gh/pybel/pybel/pull/319?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [src/pybel/manager/cache\_manager.py](https://codecov.io/gh/pybel/pybel/pull/319/diff?src=pr&el=tree#diff-c3JjL3B5YmVsL21hbmFnZXIvY2FjaGVfbWFuYWdlci5weQ==) | `81.19% <100%> (ø)` | | ------ [Continue to review full report at Codecov](https://codecov.io/gh/pybel/pybel/pull/319?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/pybel/pybel/pull/319?src=pr&el=footer). Last update [45580a5...b2dc09f](https://codecov.io/gh/pybel/pybel/pull/319?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments). scolby33: I modified the script to allow alternate connection strings and ran some timing tests on other RDBMS': | Connection | Insert Time | Drop Time | |----|----|----| | `sqlite://` | 201.38 | 6.50 | | `postgresql+psycopg2://scott@localhost/pybeltest` | 400.33 | 34.51 | All times in seconds. No statistics were done, but similar times were found for three runs with each DB. With SQLite, I get a Python process at 99% CPU; with PostgreSQL, I get a Python process at ~45% and a Postgres process at about the same. Very interesting.
diff --git a/src/pybel/manager/cache_manager.py b/src/pybel/manager/cache_manager.py index c3f81dd5..b19921c7 100644 --- a/src/pybel/manager/cache_manager.py +++ b/src/pybel/manager/cache_manager.py @@ -27,7 +27,7 @@ from .exc import EdgeAddError from .lookup_manager import LookupManager from .models import ( Annotation, AnnotationEntry, Author, Citation, Edge, Evidence, Modification, Namespace, NamespaceEntry, Network, - Node, Property, + Node, Property, network_edge, network_node, edge_property, edge_annotation ) from .query_manager import QueryManager from .utils import extract_shared_optional, extract_shared_required, update_insert_values @@ -637,28 +637,55 @@ class NetworkManager(NamespaceManager, AnnotationManager): """ return self.session.query(exists().where(and_(Network.name == name, Network.version == version))).scalar() - @staticmethod - def iterate_singleton_edges_from_network(network): - """Gets all edges that only belong to the given network + def query_singleton_edges_from_network(self, network): + """Returns a query selecting all edge ids that only belong to the given network :type network: Network - :rtype: iter[Edge] + :rtype: sqlalchemy.orm.query.Query """ - return ( # TODO implement with nested SQLAlchemy query for better speed - edge - for edge in network.edges - if edge.networks.count() == 1 + ne1 = aliased(network_edge, name='ne1') + ne2 = aliased(network_edge, name='ne2') + singleton_edge_ids_for_network = ( + self.session.query(ne1.c.edge_id) + .outerjoin(ne2, and_( + ne1.c.edge_id == ne2.c.edge_id, + ne1.c.network_id != ne2.c.network_id + )) + .filter(and_( + ne1.c.network_id == network.id, + ne2.c.edge_id == None + )) ) + return singleton_edge_ids_for_network def drop_network(self, network): """Drops a network, while also cleaning up any edges that are no longer part of any network. :type network: Network """ - for edge in self.iterate_singleton_edges_from_network(network): - self.session.delete(edge) + # get the IDs of the edges that will be orphaned by deleting this network + # FIXME: this list could be a problem if it becomes very large; possible optimization is a temporary table in DB + edge_ids = [result.edge_id for result in self.query_singleton_edges_from_network(network)] + + # delete the network-to-node mappings for this network + self.session.query(network_node).filter(network_node.c.network_id == network.id).delete(synchronize_session=False) + + # delete the edge-to-property mappings for the to-be-orphaned edges + self.session.query(edge_property).filter(edge_property.c.edge_id.in_(edge_ids)).delete(synchronize_session=False) - self.session.delete(network) + # delete the edge-to-annotation mappings for the to-be-orphaned edges + self.session.query(edge_annotation).filter(edge_annotation.c.edge_id.in_(edge_ids)).delete(synchronize_session=False) + + # delete the edge-to-network mappings for this network + self.session.query(network_edge).filter(network_edge.c.network_id == network.id).delete(synchronize_session=False) + + # delete the now-orphaned edges + self.session.query(Edge).filter(Edge.id.in_(edge_ids)).delete(synchronize_session=False) + + # delete the network + self.session.query(Network).filter(Network.id == network.id).delete(synchronize_session=False) + + # commit it! self.session.commit() def drop_network_by_id(self, network_id): @@ -672,8 +699,7 @@ class NetworkManager(NamespaceManager, AnnotationManager): def drop_networks(self): """Drops all networks""" for network in self.session.query(Network).all(): - self.session.delete(network) - self.session.commit() + self.drop_network(network) def get_network_versions(self, name): """Returns all of the versions of a network with the given name
Improve efficiency of network drop Right now, it's so slow that the Selventa large corpus can't be dropped in a reasonable time. Need to get smarter about building queries that efficiently find edges that are in a give network (to be dropped) but not in any other ones, then drop them. Then, drop the network.
pybel/pybel
diff --git a/scripts/run_drop_test.py b/scripts/run_drop_test.py new file mode 100644 index 00000000..49584890 --- /dev/null +++ b/scripts/run_drop_test.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +"""Test loading and dropping a network.""" + +import os +import time +from contextlib import contextmanager + +import click + +import pybel + +DEFAULT_CONNECTION = 'mysql+mysqldb://root@localhost/pbt?charset=utf8' +PICKLE = 'small_corpus.bel.gpickle' +SMALL_CORPUS_URL = 'https://arty.scai.fraunhofer.de/artifactory/bel/knowledge/selventa-small-corpus/selventa-small-corpus-20150611.bel' + + +@contextmanager +def time_me(start_string): + """Wrap statements with time logging.""" + print(start_string) + parse_start_time = time.time() + yield + print(f'ran in {time.time() - parse_start_time:.2f} seconds') + + +def get_numbers(graph, connection=None): + manager = pybel.Manager.from_connection(connection if connection else DEFAULT_CONNECTION) + print('inserting') + parse_start_time = time.time() + network = manager.insert_graph(graph) + print(f'inserted in {time.time() - parse_start_time:.2f} seconds') + + print('dropping') + drop_start_time = time.time() + manager.drop_network(network) + drop_time = time.time() - drop_start_time + print(f'dropped in {drop_time:.2f} seconds') + + return drop_time + + [email protected]() [email protected]('--connection', help=f'SQLAlchemy connection. Defaults to {DEFAULT_CONNECTION}') +def main(connection): + """Parse a network, load it to the database, then test how fast it drops.""" + + if os.path.exists(PICKLE): + print(f'opening from {PICKLE}') + graph = pybel.from_pickle(PICKLE) + else: + with time_me(f'opening from {SMALL_CORPUS_URL}'): + manager = pybel.Manager.from_connection(connection if connection else DEFAULT_CONNECTION) + graph = pybel.from_url(SMALL_CORPUS_URL, manager=manager, use_tqdm=True, citation_clearing=False) + + pybel.to_pickle(graph, PICKLE) + + n = 1 + # FIXME this fails if you do it with the same manager + + times = [ + get_numbers(graph, connection) + for _ in range(n) + ] + + print(times) + print(sum(times) / n) + + +if __name__ == '__main__': + main() diff --git a/tests/test_manager/test_manager_drop.py b/tests/test_manager/test_manager_drop.py index 35b09973..c7f65e37 100644 --- a/tests/test_manager/test_manager_drop.py +++ b/tests/test_manager/test_manager_drop.py @@ -90,10 +90,10 @@ class TestCascades(TemporaryCacheMixin): self.assertEqual(0, self.g2.edges.count()) def test_get_orphan_edges(self): - edges = list(self.manager.iterate_singleton_edges_from_network(self.g1)) + edges = [result.edge_id for result in self.manager.query_singleton_edges_from_network(self.g1)] self.assertEqual(2, len(edges)) - self.assertIn(self.e2, edges) - self.assertIn(self.e3, edges) + self.assertIn(self.e2.id, edges) + self.assertIn(self.e3.id, edges) def test_drop_network_1(self): """When a network gets dropped, drop all of the edges if they don't appear in other networks""" @@ -113,6 +113,13 @@ class TestCascades(TemporaryCacheMixin): self.assertEqual(1, self.manager.count_networks()) self.assertEqual(3, self.g1.edges.count()) + def test_drop_all_networks(self): + """When all networks are dropped, make sure all the edges and network_edge mappings are gone too""" + self.manager.drop_networks() + + self.assertEqual(0, self.manager.count_edges()) + self.assertEqual(0, self.manager.count_networks()) + def test_drop_modification(self): """Don't let this happen"""
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
0.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 coverage==7.2.7 decorator==5.1.1 execnet==2.0.2 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core greenlet==3.1.1 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work networkx==1.11 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work -e git+https://github.com/pybel/pybel.git@499c96c87bd4e94854f509b83d221a357902c57d#egg=PyBEL pyparsing==3.1.4 pytest==7.1.2 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 requests==2.31.0 requests-file==2.1.0 six==1.17.0 SQLAlchemy==2.0.40 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work tqdm==4.67.1 typing_extensions==4.7.1 urllib3==2.0.7 zipp @ file:///croot/zipp_1672387121353/work
name: pybel channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - click==8.1.8 - coverage==7.2.7 - decorator==5.1.1 - execnet==2.0.2 - greenlet==3.1.1 - idna==3.10 - networkx==1.11 - pyparsing==3.1.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - requests==2.31.0 - requests-file==2.1.0 - six==1.17.0 - sqlalchemy==2.0.40 - tqdm==4.67.1 - typing-extensions==4.7.1 - urllib3==2.0.7 prefix: /opt/conda/envs/pybel
[ "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_all_networks", "tests/test_manager/test_manager_drop.py::TestCascades::test_get_orphan_edges" ]
[]
[ "tests/test_manager/test_manager_drop.py::TestReconstituteNodeTuples::test_simple", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_edge", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_modification", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_namespace", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_network_1", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_network_2", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_node", "tests/test_manager/test_manager_drop.py::TestCascades::test_drop_property" ]
[]
MIT License
2,808
[ "src/pybel/manager/cache_manager.py" ]
[ "src/pybel/manager/cache_manager.py" ]
sernst__cauldron-42
3ebb9fece61219af16f73fd67a1c0e7fc584f770
2018-07-22 13:34:14
3ebb9fece61219af16f73fd67a1c0e7fc584f770
diff --git a/cauldron/session/exposed.py b/cauldron/session/exposed.py index d2c7c17..dea7dc6 100644 --- a/cauldron/session/exposed.py +++ b/cauldron/session/exposed.py @@ -264,6 +264,21 @@ class ExposedStep(object): interceptor = self._step.report.stdout_interceptor interceptor.write_source('{}'.format(message)) + def render_to_console(self, message: str, **kwargs): + """ + Renders the specified message to the console using Jinja2 template + rendering with the kwargs as render variables. The message will also + be dedented prior to rendering in the same fashion as other Cauldron + template rendering actions. + + :param message: + Template string to be rendered. + :param kwargs: + Variables to be used in rendering the template. + """ + rendered = templating.render(message, **kwargs) + return self.write_to_console(rendered) + def render_stop_display(step: 'projects.ProjectStep', message: str): """Renders a stop action to the Cauldron display."""
Render to Console In addition to the `cd.step.write_to_console()` function, it would be nice to have a `cd.step.render_to_console()` that included Jinja2 templating and `textwrap.dedent()` internally to make complex console messages easier to write.
sernst/cauldron
diff --git a/cauldron/test/projects/test_exposed.py b/cauldron/test/projects/test_exposed.py index 88634b2..3f46bc4 100644 --- a/cauldron/test/projects/test_exposed.py +++ b/cauldron/test/projects/test_exposed.py @@ -245,6 +245,27 @@ class TestExposed(scaffolds.ResultsTest): args, kwargs = write_source.call_args self.assertEqual('{}'.format(message), args[0]) + @patch( + 'cauldron.session.exposed.ExposedStep._step', + new_callable=PropertyMock + ) + def test_render_to_console(self, _step: PropertyMock): + """ + Should render to the console using a write_source function + call on the internal step report's stdout_interceptor. + """ + message = ' {{ a }} is not {{ b }}.' + + _step_mock = MagicMock() + write_source = MagicMock() + _step_mock.report.stdout_interceptor.write_source = write_source + _step.return_value = _step_mock + step = exposed.ExposedStep() + step.render_to_console(message, a=7, b='happy') + + args, kwargs = write_source.call_args + self.assertEqual('7 is not happy.', args[0]) + @patch( 'cauldron.session.exposed.ExposedStep._step', new_callable=PropertyMock
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest-cov", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 beautifulsoup4==4.12.3 bokeh==2.3.3 -e git+https://github.com/sernst/cauldron.git@3ebb9fece61219af16f73fd67a1c0e7fc584f770#egg=cauldron_notebook certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 coverage==6.2 cycler==0.11.0 dataclasses==0.8 Flask==2.0.3 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 itsdangerous==2.0.1 Jinja2==3.0.3 kiwisolver==1.3.1 Markdown==3.3.7 MarkupSafe==2.0.1 matplotlib==3.3.4 numpy==1.19.5 packaging==21.3 pandas==1.1.5 Pillow==8.4.0 plotly==5.18.0 pluggy==1.0.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-runner==5.3.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 scipy==1.5.4 seaborn==0.11.2 six==1.17.0 soupsieve==2.3.2.post1 tenacity==8.2.2 tomli==1.2.3 tornado==6.1 typing_extensions==4.1.1 urllib3==1.26.20 Werkzeug==2.0.3 zipp==3.6.0
name: cauldron channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - beautifulsoup4==4.12.3 - bokeh==2.3.3 - charset-normalizer==2.0.12 - click==8.0.4 - coverage==6.2 - cycler==0.11.0 - dataclasses==0.8 - flask==2.0.3 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - itsdangerous==2.0.1 - jinja2==3.0.3 - kiwisolver==1.3.1 - markdown==3.3.7 - markupsafe==2.0.1 - matplotlib==3.3.4 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pillow==8.4.0 - plotly==5.18.0 - pluggy==1.0.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-runner==5.3.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - scipy==1.5.4 - seaborn==0.11.2 - six==1.17.0 - soupsieve==2.3.2.post1 - tenacity==8.2.2 - tomli==1.2.3 - tornado==6.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - werkzeug==2.0.3 - zipp==3.6.0 prefix: /opt/conda/envs/cauldron
[ "cauldron/test/projects/test_exposed.py::TestExposed::test_render_to_console" ]
[]
[ "cauldron/test/projects/test_exposed.py::TestExposed::test_change_title", "cauldron/test/projects/test_exposed.py::TestExposed::test_get_internal_project", "cauldron/test/projects/test_exposed.py::TestExposed::test_get_internal_project_fail", "cauldron/test/projects/test_exposed.py::TestExposed::test_no_project_defaults", "cauldron/test/projects/test_exposed.py::TestExposed::test_no_step_defaults", "cauldron/test/projects/test_exposed.py::TestExposed::test_project_path", "cauldron/test/projects/test_exposed.py::TestExposed::test_project_stop_aborted", "cauldron/test/projects/test_exposed.py::TestExposed::test_render_stop_display", "cauldron/test/projects/test_exposed.py::TestExposed::test_render_stop_display_error", "cauldron/test/projects/test_exposed.py::TestExposed::test_step_properties", "cauldron/test/projects/test_exposed.py::TestExposed::test_step_stop_aborted", "cauldron/test/projects/test_exposed.py::TestExposed::test_step_visibility", "cauldron/test/projects/test_exposed.py::TestExposed::test_stop_project", "cauldron/test/projects/test_exposed.py::TestExposed::test_stop_step_and_halt", "cauldron/test/projects/test_exposed.py::TestExposed::test_stop_step_no_halt", "cauldron/test/projects/test_exposed.py::TestExposed::test_stop_step_silent", "cauldron/test/projects/test_exposed.py::TestExposed::test_write_to_console", "cauldron/test/projects/test_exposed.py::TestExposed::test_write_to_console_fail" ]
[]
MIT License
2,810
[ "cauldron/session/exposed.py" ]
[ "cauldron/session/exposed.py" ]
claws__aioprometheus-30
bc9975761a1b938e91ec1aa81120d8a78c911d21
2018-07-23 12:29:36
bc9975761a1b938e91ec1aa81120d8a78c911d21
diff --git a/README.rst b/README.rst index 66676fa..6d1b6e2 100644 --- a/README.rst +++ b/README.rst @@ -33,40 +33,39 @@ and exposed via a HTTP endpoint. .. code-block:: python #!/usr/bin/env python - ''' + """ This example demonstrates how a single Counter metric collector can be created and exposed via a HTTP endpoint. - ''' + """ import asyncio import socket from aioprometheus import Counter, Service - if __name__ == '__main__': + if __name__ == "__main__": - loop = asyncio.get_event_loop() - - svr = Service() + async def main(svr: Service) -> None: - events_counter = Counter( - "events", - "Number of events.", - const_labels={'host': socket.gethostname()}) + events_counter = Counter( + "events", "Number of events.", const_labels={"host": socket.gethostname()} + ) + svr.register(events_counter) + await svr.start(addr="127.0.0.1", port=5000) + print(f"Serving prometheus metrics on: {svr.metrics_url}") - svr.register(events_counter) + # Now start another coroutine to periodically update a metric to + # simulate the application making some progress. + async def updater(c: Counter): + while True: + c.inc({"kind": "timer_expiry"}) + await asyncio.sleep(1.0) - loop.run_until_complete(svr.start(addr="127.0.0.1")) - print(f'Serving prometheus metrics on: {svr.metrics_url}') - - async def updater(m: Counter): - # Periodically update the metric to simulate some progress - # happening in a real application. - while True: - m.inc({'kind': 'timer_expiry'}) - await asyncio.sleep(1.0) + await updater(events_counter) + loop = asyncio.get_event_loop() + svr = Service() try: - loop.run_until_complete(updater(events_counter)) + loop.run_until_complete(main(svr)) except KeyboardInterrupt: pass finally: @@ -96,7 +95,7 @@ The example script can be run using: (venv) $ cd examples (venv) $ python simple-example.py - Serving prometheus metrics on: http://127.0.0.1:50624/metrics + Serving prometheus metrics on: http://127.0.0.1:5000/metrics In another terminal fetch the metrics using the ``curl`` command line tool to verify they can be retrieved by Prometheus server. @@ -105,7 +104,7 @@ By default metrics will be returned in plan text format. .. code-block:: console - $ curl http://127.0.0.1:50624/metrics + $ curl http://127.0.0.1:5000/metrics # HELP events Number of events. # TYPE events counter events{host="alpha",kind="timer_expiry"} 33 @@ -115,7 +114,7 @@ to read on the command line. .. code-block:: console - $ curl http://127.0.0.1:50624/metrics -H "ACCEPT: application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited" + $ curl http://127.0.0.1:5000/metrics -H "ACCEPT: application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited" The metrics service also responds to requests sent to its ``/`` route. The response is simple HTML. This route can be useful as a Kubernetes ``/healthz`` @@ -124,15 +123,64 @@ to serialize a full metrics response. .. code-block:: console - $ curl http://127.0.0.1:50624/ + $ curl http://127.0.0.1:5000/ <html><body><a href='/metrics'>metrics</a></body></html> -A number of convenience decorator functions are also available to assist with -updating metrics. +The aioprometheus package provides a number of convenience decorator +functions that can assist with updating metrics. + +There ``examples`` directory contains many examples showing how to use the +aioprometheus package. The ``app-example.py`` file will likely be of interest +as it provides a more representative application example that the simple +example shown above. + +Examples in the ``examples/frameworks`` directory show how aioprometheus can +be used within existing aiohttp, quart and vibora applications instead of +creating a separate aioprometheus.Service endpoint to handle metrics. The +vibora example is shown below. + +.. code-block:: python + + #!/usr/bin/env python + """ + Sometimes you want to expose Prometheus metrics from within an existing web + service and don't want to start a separate Prometheus metrics server. + + This example uses the aioprometheus package to add Prometheus instrumentation + to a Vibora application. In this example a registry and a counter metric is + instantiated. A '/metrics' route is added to the application and the render + function from aioprometheus is called to format the metrics into the + appropriate format. + """ + + from aioprometheus import render, Counter, Registry + from vibora import Vibora, Request, Response + + + app = Vibora(__name__) + app.registry = Registry() + app.events_counter = Counter("events", "Number of events.") + app.registry.register(app.events_counter) + + + @app.route("/") + async def hello(request: Request): + app.events_counter.inc({"path": "/"}) + return Response(b"hello") + + + @app.route("/metrics") + async def handle_metrics(request: Request): + """ + Negotiate a response format by inspecting the ACCEPTS headers and selecting + the most efficient format. Render metrics in the registry into the chosen + format and return a response. + """ + content, http_headers = render(app.registry, [request.headers.get("accept")]) + return Response(content, headers=http_headers) + -There are more examples in the ``examples`` directory. The ``app-example.py`` -file will likely be of interest as it provides a more representative -application example. + app.run() License diff --git a/docs/index.rst b/docs/index.rst index 919115b..92626ec 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -64,7 +64,7 @@ The example script can be run using: (venv) $ cd examples (venv) $ python simple-example.py - Serving prometheus metrics on: http://127.0.0.1:50624/metrics + Serving prometheus metrics on: http://127.0.0.1:5000/metrics In another terminal fetch the metrics using the ``curl`` command line tool to verify they can be retrieved by Prometheus server. @@ -73,12 +73,12 @@ By default metrics will be returned in plan text format. .. code-block:: console - $ curl http://127.0.0.1:50624/metrics + $ curl http://127.0.0.1:5000/metrics # HELP events Number of events. # TYPE events counter events{host="alpha",kind="timer_expiry"} 33 - $ curl http://127.0.0.1:50624/metrics -H 'Accept: text/plain; version=0.0.4' + $ curl http://127.0.0.1:5000/metrics -H 'Accept: text/plain; version=0.0.4' # HELP events Number of events. # TYPE events counter events{host="alpha",kind="timer_expiry"} 36 @@ -88,7 +88,7 @@ to read on the command line. .. code-block:: console - $ curl http://127.0.0.1:50624/metrics -H "ACCEPT: application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited" + $ curl http://127.0.0.1:5000/metrics -H "ACCEPT: application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited" The metrics service also responds to requests sent to its ``/`` route. The response is simple HTML. This route can be useful as a Kubernetes health @@ -97,15 +97,24 @@ serialize a full metrics response. .. code-block:: console - $ curl http://127.0.0.1:50624/ + $ curl http://127.0.0.1:5000/ <html><body><a href='/metrics'>metrics</a></body></html> -A number of convenience decorator functions are also available to assist with -updating metrics. +The aioprometheus package provides a number of convenience decorator +functions that can assist with updating metrics. -There are more examples in the ``examples`` directory. The ``app-example.py`` -file will likely be of interest as it provides a more representative -application example. +There ``examples`` directory contains many examples showing how to use the +aioprometheus package. The ``app-example.py`` file will likely be of interest +as it provides a more representative application example that the simple +example shown above. + +Examples in the ``examples/frameworks`` directory show how aioprometheus can +be used within existing aiohttp, quart and vibora applications instead of +creating a separate aioprometheus.Service endpoint to handle metrics. The +vibora example is shown below. + +.. literalinclude:: ../examples/frameworks/vibora-example.py + :language: python3 License diff --git a/docs/user/index.rst b/docs/user/index.rst index f9030f1..5ac57aa 100644 --- a/docs/user/index.rst +++ b/docs/user/index.rst @@ -222,7 +222,7 @@ The example script can be run using: (venv) $ cd examples (venv) $ python simple-example.py - Serving prometheus metrics on: http://127.0.0.1:50624/metrics + Serving prometheus metrics on: http://127.0.0.1:5000/metrics In another terminal fetch the metrics using the ``curl`` command line tool to verify they can be retrieved by Prometheus server. @@ -231,12 +231,12 @@ By default metrics will be returned in plan text format. .. code-block:: console - $ curl http://127.0.0.1:50624/metrics + $ curl http://127.0.0.1:5000/metrics # HELP events Number of events. # TYPE events counter events{host="alpha",kind="timer_expiry"} 33 - $ curl http://127.0.0.1:50624/metrics -H 'Accept: text/plain; version=0.0.4' + $ curl http://127.0.0.1:5000/metrics -H 'Accept: text/plain; version=0.0.4' # HELP events Number of events. # TYPE events counter events{host="alpha",kind="timer_expiry"} 36 @@ -246,7 +246,7 @@ to read on the command line. .. code-block:: console - $ curl http://127.0.0.1:50624/metrics -H "ACCEPT: application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited" + $ curl http://127.0.0.1:5000/metrics -H "ACCEPT: application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited" The metrics service also responds to requests sent to its ``/`` route. The response is simple HTML. This route can be useful as a Kubernetes ``/healthz`` @@ -255,7 +255,7 @@ to serialize a full metrics response. .. code-block:: console - $ curl http://127.0.0.1:50624/ + $ curl http://127.0.0.1:5000/ <html><body><a href='/metrics'>metrics</a></body></html> @@ -274,11 +274,22 @@ The example can be run using .. code-block:: console (env) $ python app-example.py - Serving prometheus metrics on: http://127.0.0.1:50624/metrics + Serving prometheus metrics on: http://127.0.0.1:5000/metrics You can use the ``curl`` command line tool to fetch metrics manually or use the helper script described in the next section. +Frameworks Example +++++++++++++++++++ + +The aioprometheus package can also be used within other web framework based +applications such as ``aiohttp``, ``quart`` and ``vibora`` applications. +This usage approach removes the need to create a separate server endpoint +to handle metrics. The vibora example is shown below. + +.. literalinclude:: ../../examples/frameworks/vibora-example.py + :language: python3 + Checking examples using helper script ------------------------------------- @@ -307,7 +318,7 @@ Example: .. code-block:: console - $ python metrics-fetcher.py --url=http://127.0.0.1:50624/metrics --format=text --interval=2.0 + $ python metrics-fetcher.py --url=http://127.0.0.1:5000/metrics --format=text --interval=2.0 Checking Example using Prometheus @@ -334,7 +345,7 @@ we can create a minimal configuration file to scrape the example application. scrape_timeout: 10s target_groups: - - targets: ['localhost:50624'] + - targets: ['localhost:5000'] labels: group: 'dev' diff --git a/examples/app-example.py b/examples/app-example.py index 230092e..e560758 100644 --- a/examples/app-example.py +++ b/examples/app-example.py @@ -22,13 +22,25 @@ from asyncio.base_events import BaseEventLoop class ExampleApp(object): """ An example application that demonstrates how ``aioprometheus`` can be - used within a Python async application. + integrated and used within a Python application built upon asyncio. + + This application attempts to simulate a long running distributed system + process, say a socket relay or some kind of message adapter. It is + intentionally not hosting an existing web service in the application. + + In this case the aioprometheus.Service object is used to provide a + new HTTP endpoint that can be used to expose Prometheus metrics on. + + If this application was a web service (i.e. already had an existing web + interface) then the aioprometheus.Service object could be used as before + to add another web interface or a different approach could be used that + provides a metrics handler function for use with the existing web service. """ def __init__( self, metrics_host="127.0.0.1", - metrics_port: int = 0, + metrics_port: int = 5000, loop: BaseEventLoop = None, ): diff --git a/examples/frameworks/aiohttp-example.py b/examples/frameworks/aiohttp-example.py new file mode 100644 index 0000000..5a468fc --- /dev/null +++ b/examples/frameworks/aiohttp-example.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +""" +Sometimes you want to expose Prometheus metrics from within an existing web +service and don't want to start a separate Prometheus metrics server. + +This example uses the aioprometheus package to add Prometheus instrumentation +to an aiohttp application. In this example a registry and a counter metric is +instantiated. A '/metrics' route is added to the application and the render +function from aioprometheus is called to format the metrics into the +appropriate format. +""" + +from aiohttp import web +from aiohttp.hdrs import ACCEPT +from aioprometheus import render, Counter, Registry + + +app = web.Application() +app.registry = Registry() +app.events_counter = Counter("events", "Number of events.") +app.registry.register(app.events_counter) + + +async def handle_root(request): + app.events_counter.inc({"path": "/"}) + text = "Hello aiohttp" + return web.Response(text=text) + + +async def handle_metrics(request): + content, http_headers = render(app.registry, request.headers.getall(ACCEPT, [])) + return web.Response(body=content, headers=http_headers) + + +app.add_routes([web.get("/", handle_root), web.get("/metrics", handle_metrics)]) + + +web.run_app(app) diff --git a/examples/frameworks/quart-example.py b/examples/frameworks/quart-example.py new file mode 100644 index 0000000..b45873c --- /dev/null +++ b/examples/frameworks/quart-example.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +""" +Sometimes you want to expose Prometheus metrics from within an existing web +service and don't want to start a separate Prometheus metrics server. + +This example uses the aioprometheus package to add Prometheus instrumentation +to a Quart application. In this example a registry and a counter metric is +instantiated. A '/metrics' route is added to the application and the render +function from aioprometheus is called to format the metrics into the +appropriate format. +""" + +from aioprometheus import render, Counter, Registry +from quart import Quart, request + + +app = Quart(__name__) +app.registry = Registry() +app.events_counter = Counter("events", "Number of events.") +app.registry.register(app.events_counter) + + [email protected]("/") +async def hello(): + app.events_counter.inc({"path": "/"}) + return "hello" + + [email protected]("/metrics") +async def handle_metrics(): + content, http_headers = render(app.registry, request.headers.getlist("accept")) + return content, http_headers + + +app.run() diff --git a/examples/frameworks/vibora-example.py b/examples/frameworks/vibora-example.py new file mode 100644 index 0000000..f42e5d0 --- /dev/null +++ b/examples/frameworks/vibora-example.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +""" +Sometimes you want to expose Prometheus metrics from within an existing web +service and don't want to start a separate Prometheus metrics server. + +This example uses the aioprometheus package to add Prometheus instrumentation +to a Vibora application. In this example a registry and a counter metric is +instantiated. A '/metrics' route is added to the application and the render +function from aioprometheus is called to format the metrics into the +appropriate format. +""" + +from aioprometheus import render, Counter, Registry +from vibora import Vibora, Request, Response + + +app = Vibora(__name__) +app.registry = Registry() +app.events_counter = Counter("events", "Number of events.") +app.registry.register(app.events_counter) + + [email protected]("/") +async def hello(request: Request): + app.events_counter.inc({"path": "/"}) + return Response(b"hello") + + [email protected]("/metrics") +async def handle_metrics(request: Request): + """ + Negotiate a response format by inspecting the ACCEPTS headers and selecting + the most efficient format. Render metrics in the registry into the chosen + format and return a response. + """ + content, http_headers = render(app.registry, [request.headers.get("accept")]) + return Response(content, headers=http_headers) + + +app.run() diff --git a/examples/simple-example.py b/examples/simple-example.py index 83d66df..ed41712 100644 --- a/examples/simple-example.py +++ b/examples/simple-example.py @@ -1,16 +1,17 @@ #!/usr/bin/env python """ -This example demonstrates how a single Counter metric collector can be created -and exposed via a HTTP endpoint. +This example demonstrates how aioprometheus can be used to expose metrics on +a HTTP endpoint that is provided by the aioprometheus.Service object. .. code-block:: console (env) $ python simple-example.py - Serving prometheus metrics on: http://127.0.0.1:50624/metrics + Serving prometheus metrics on: http://127.0.0.1:5000/metrics In another terminal fetch the metrics using the ``curl`` command line tool to verify they can be retrieved by Prometheus server. """ + import asyncio import socket from aioprometheus import Counter, Service @@ -18,28 +19,28 @@ from aioprometheus import Counter, Service if __name__ == "__main__": - loop = asyncio.get_event_loop() - - svr = Service() + async def main(svr: Service) -> None: - events_counter = Counter( - "events", "Number of events.", const_labels={"host": socket.gethostname()} - ) + events_counter = Counter( + "events", "Number of events.", const_labels={"host": socket.gethostname()} + ) + svr.register(events_counter) + await svr.start(addr="127.0.0.1", port=5000) + print(f"Serving prometheus metrics on: {svr.metrics_url}") - svr.register(events_counter) + # Now start another coroutine to periodically update a metric to + # simulate the application making some progress. + async def updater(c: Counter): + while True: + c.inc({"kind": "timer_expiry"}) + await asyncio.sleep(1.0) - loop.run_until_complete(svr.start(addr="127.0.0.1")) - print(f"Serving prometheus metrics on: {svr.metrics_url}") - - async def updater(m: Counter): - # Periodically update the metric to simulate some progress - # happening in a real application. - while True: - m.inc({"kind": "timer_expiry"}) - await asyncio.sleep(1.0) + await updater(events_counter) + loop = asyncio.get_event_loop() + svr = Service() try: - loop.run_until_complete(updater(events_counter)) + loop.run_until_complete(main(svr)) except KeyboardInterrupt: pass finally: diff --git a/requirements.txt b/requirements.txt index 93afa4e..408a37c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -aiohttp >= 3.2.1 +aiohttp >= 3.3.2 asynctest prometheus-metrics-proto >= 18.1.1 quantile-python >= 1.1 diff --git a/src/aioprometheus/__init__.py b/src/aioprometheus/__init__.py index 2a12977..acbe1bb 100644 --- a/src/aioprometheus/__init__.py +++ b/src/aioprometheus/__init__.py @@ -3,9 +3,10 @@ from .collectors import Collector, Counter, Gauge, Summary, Histogram from .decorators import count_exceptions, inprogress, timer from . import formats from . import pusher -from . import negotiator +from .negotiator import negotiate from .registry import Registry, CollectorRegistry from .service import Service +from .renderer import render -__version__ = "18.01.04" +__version__ = "18.7.1" diff --git a/src/aioprometheus/formats/__init__.py b/src/aioprometheus/formats/__init__.py index e521e6c..b4c191c 100644 --- a/src/aioprometheus/formats/__init__.py +++ b/src/aioprometheus/formats/__init__.py @@ -1,3 +1,3 @@ from .base import IFormatter -from .text import TextFormatter, TEXT_CONTENT_TYPE -from .binary import BinaryFormatter, BINARY_CONTENT_TYPE +from .text import TextFormatter, TEXT_CONTENT_TYPE, TEXT_ACCEPTS +from .binary import BinaryFormatter, BINARY_CONTENT_TYPE, BINARY_ACCEPTS diff --git a/src/aioprometheus/formats/binary.py b/src/aioprometheus/formats/binary.py index 4b447f3..b5ae25d 100644 --- a/src/aioprometheus/formats/binary.py +++ b/src/aioprometheus/formats/binary.py @@ -30,6 +30,7 @@ BINARY_CONTENT_TYPE = ( "proto=io.prometheus.client.MetricFamily; " "encoding=delimited" ) +BINARY_ACCEPTS = set(BINARY_CONTENT_TYPE.split("; ")) class BinaryFormatter(IFormatter): diff --git a/src/aioprometheus/formats/text.py b/src/aioprometheus/formats/text.py index 52cd93a..02e75d2 100644 --- a/src/aioprometheus/formats/text.py +++ b/src/aioprometheus/formats/text.py @@ -35,6 +35,7 @@ NEG_INF = float("-inf") TEXT_CONTENT_TYPE = "text/plain; version=0.0.4; charset=utf-8" +TEXT_ACCEPTS = set(TEXT_CONTENT_TYPE.split("; ")) class TextFormatter(IFormatter): diff --git a/src/aioprometheus/negotiator.py b/src/aioprometheus/negotiator.py index 2284873..de96926 100644 --- a/src/aioprometheus/negotiator.py +++ b/src/aioprometheus/negotiator.py @@ -2,7 +2,7 @@ import logging from . import formats -from typing import Callable, Set +from typing import Callable, Sequence, Set logger = logging.getLogger(__name__) @@ -11,29 +11,40 @@ logger = logging.getLogger(__name__) FormatterType = Callable[[bool], formats.IFormatter] -ProtobufAccepts = set(formats.BINARY_CONTENT_TYPE.split("; ")) -TextAccepts = set(formats.TEXT_CONTENT_TYPE.split("; ")) +ProtobufAccepts = formats.BINARY_ACCEPTS +TextAccepts = formats.TEXT_ACCEPTS -def negotiate(accepts: Set[str]) -> FormatterType: - """ Negotiate a response format by scanning through the ACCEPTS - header and selecting the most efficient format. +def negotiate(accepts_headers: Sequence[str]) -> FormatterType: + """ Negotiate a response format by scanning through a list of ACCEPTS + headers and selecting the most efficient format. The formatter returned by this function is used to render a response. - :param accepts: a set of ACCEPT headers fields extracted from a request. + :param accepts_headers: a list of ACCEPT headers fields extracted from a request. :returns: a formatter class to form up the response into the appropriate representation. """ - if not isinstance(accepts, set): - raise TypeError("Expected a set but got {}".format(type(accepts))) + accepts = parse_accepts(accepts_headers) formatter = formats.TextFormatter # type: FormatterType if ProtobufAccepts.issubset(accepts): formatter = formats.BinaryFormatter # type: ignore - logger.debug("negotiating %s resulted in choosing %s", accepts, formatter.__name__) + logger.debug(f"negotiating {accepts} resulted in choosing {formatter.__name__}") return formatter + + +def parse_accepts(accept_headers: Sequence[str]) -> Set[str]: + """ Return a sequence of accepts items in the request headers """ + accepts = set() # type: Set[str] + for accept_items in accept_headers: + if ";" in accept_items: + accept_items = [i.strip() for i in accept_items.split(";")] + else: + accept_items = [accept_items] + accepts.update(accept_items) + return accepts diff --git a/src/aioprometheus/renderer.py b/src/aioprometheus/renderer.py new file mode 100644 index 0000000..07b0246 --- /dev/null +++ b/src/aioprometheus/renderer.py @@ -0,0 +1,37 @@ + +from .formats import TextFormatter, BinaryFormatter +from .registry import Registry +from .negotiator import negotiate +from typing import Sequence, Tuple, Union + + +def render(registry: Registry, accepts_headers: Sequence[str]) -> Tuple[bytes, dict]: + """ Render the metrics in this registry to a specific format. + + The format chosen is determined by scanning through the ACCEPTS headers + and selecting the most efficient format. If no accepts headers + information is provided then Text format is used as the default. + + :param registry: A collector registry that contains the metrics to be + rendered into a specific format. + + :param accepts_headers: a list of ACCEPT headers fields extracted from a request. + + :returns: a 2-tuple where the first item is a bytes object that + represents the formatted metrics and the second item is a dict of + header fields that can be added to a HTTP response. + """ + if not isinstance(registry, Registry): + raise Exception(f"registry must be a Registry, got: {type(registry)}") + + if not isinstance(accepts_headers, (set, list, tuple)): + raise Exception( + f"accepts_headers must be a sequence, got: {type(accepts_headers)}" + ) + + Formatter = negotiate(accepts_headers) + formatter = Formatter() # type: Union[TextFormatter, BinaryFormatter] + + http_headers = formatter.get_headers() + content = formatter.marshall(registry) + return content, http_headers diff --git a/src/aioprometheus/service.py b/src/aioprometheus/service.py index 67bcf04..29860d3 100644 --- a/src/aioprometheus/service.py +++ b/src/aioprometheus/service.py @@ -9,7 +9,7 @@ import aiohttp.web from aiohttp.hdrs import METH_GET as GET, ACCEPT -from .negotiator import negotiate +from .renderer import render from .registry import Registry, CollectorsType from typing import Optional, Set @@ -68,17 +68,11 @@ class Service(object): "No URL available, Prometheus metrics server is not running" ) - # IPv4 returns 2-tuple, IPv6 returns 4-tuple - host, port, *_ = self._site._server.sockets[0].getsockname() + # IPv4 address returns a 2-tuple, IPv6 returns a 4-tuple + host, port, *_ = self._runner.addresses[0] scheme = "http{}".format("s" if self._https else "") - url = "{scheme}://{host}:{port}".format( - scheme=scheme, - host=host if ":" not in host else "[{}]".format(host), - port=port, - ) - # - # TODO: replace the above with self._site.name when aiohttp issue - # #3018 is resolved. + host = host if ":" not in host else f"[{host}]" + url = f"{scheme}://{host}:{port}" return url @property @@ -107,7 +101,6 @@ class Service(object): port: int = 0, ssl: SSLContext = None, metrics_url: str = DEFAULT_METRICS_PATH, - discovery_agent=None, ) -> None: """ Start the prometheus metrics HTTP(S) server. @@ -125,9 +118,6 @@ class Service(object): :param metrics_url: The name of the endpoint route to expose prometheus metrics on. Defaults to '/metrics'. - :param discovery_agent: an agent that can register the metrics - service with a service discovery mechanism. - :raises: Exception if the server could not be started. """ logger.debug( @@ -159,19 +149,11 @@ class Service(object): logger.debug("Prometheus metrics server started on %s", self.metrics_url) - # register service with service discovery - if discovery_agent: - await discovery_agent.register(self) - - async def stop(self, wait_duration: float = 1.0, discovery_agent=None) -> None: + async def stop(self, wait_duration: float = 1.0) -> None: """ Stop the prometheus metrics HTTP(S) server. :param wait_duration: the number of seconds to wait for connections to finish. - - :param discovery_agent: an agent that can deregister the metrics - service from a service discovery mechanism. - """ logger.debug("Prometheus metrics server stopping") @@ -179,10 +161,6 @@ class Service(object): logger.warning("Prometheus metrics server is already stopped") return - # de-register service with service discovery - if discovery_agent: - await discovery_agent.deregister(self) - await self._runner.cleanup() self._site = None self._app = None @@ -213,19 +191,15 @@ class Service(object): The request is inspected and the most efficient response data format is chosen. """ - Formatter = negotiate(self.accepts(request)) - formatter = Formatter(False) - - resp = aiohttp.web.Response() - resp.headers.update(formatter.get_headers()) - resp.body = formatter.marshall(self.registry) - return resp + content, http_headers = render( + self.registry, request.headers.getall(ACCEPT, []) + ) + return aiohttp.web.Response(body=content, headers=http_headers) def accepts(self, request: aiohttp.web.Request) -> Set[str]: """ Return a sequence of accepts items in the request headers """ accepts = set() # type: Set[str] accept_headers = request.headers.getall(ACCEPT, []) - logger.debug("accept: {}".format(accept_headers)) for accept_items in accept_headers: if ";" in accept_items: accept_items = [i.strip() for i in accept_items.split(";")] @@ -240,11 +214,10 @@ class Service(object): Serves a trivial page with a link to the metrics. Use this if ever you need to point a health check at your the service. """ + metrics_url = request.app["metrics_url"] return aiohttp.web.Response( content_type="text/html", - text="<html><body><a href='{}'>metrics</a></body></html>".format( - request.app["metrics_url"] - ), + text=f"<html><body><a href='{metrics_url}'>metrics</a></body></html>", ) async def handle_robots(self, request: aiohttp.web.Request) -> aiohttp.web.Response:
metrics handler to existing server hello, would be nice to include some documentation on how to do something like: ``` from aiohttp import web from aioprometheus import Service prometheus_service = Service() async def handle(request): text = "Hello aiohttp" return web.Response(text=text) app = web.Application() app.router.add_get('/', handle) app.router.add_get('/metrics', prometheus_service.handle_metrics) web.run_app(app) ```
claws/aioprometheus
diff --git a/tests/test_aiohttp.py b/tests/test_aiohttp.py new file mode 100644 index 0000000..195752d --- /dev/null +++ b/tests/test_aiohttp.py @@ -0,0 +1,91 @@ +import asynctest +import aiohttp +import aiohttp.hdrs +import aiohttp.web +import unittest +import aioprometheus + + +class TestAiohttpRender(asynctest.TestCase): + """ + Test exposing Prometheus metrics from within a aiohttp existing web + service without starting a separate Prometheus metrics server. + """ + + async def test_render_in_aiohttp_app(self): + """ check render usage in aiohttp app """ + + app = aiohttp.web.Application() + app.registry = aioprometheus.Registry() + app.events_counter = aioprometheus.Counter("events", "Number of events.") + app.registry.register(app.events_counter) + + async def index(request): + app.events_counter.inc({"path": "/"}) + return aiohttp.web.Response(text="hello") + + async def handle_metrics(request): + content, http_headers = aioprometheus.render( + app.registry, request.headers.getall(aiohttp.hdrs.ACCEPT, []) + ) + return aiohttp.web.Response(body=content, headers=http_headers) + + app.add_routes( + [aiohttp.web.get("/", index), aiohttp.web.get("/metrics", handle_metrics)] + ) + + runner = aiohttp.web.AppRunner(app) + await runner.setup() + + site = aiohttp.web.TCPSite(runner, "127.0.0.1", 0, shutdown_timeout=1.0) + await site.start() + + # Fetch ephemeral port that was bound. + # IPv4 address returns a 2-tuple, IPv6 returns a 4-tuple + host, port, *_ = runner.addresses[0] + host = host if ":" not in host else f"[{host}]" + url = f"http://{host}:{port}" + root_url = f"{url}/" + metrics_url = f"{url}/metrics" + + async with aiohttp.ClientSession() as session: + + # Access root to increment metric counter + async with session.get(root_url) as response: + self.assertEqual(response.status, 200) + + # Get default format + async with session.get( + metrics_url, headers={aiohttp.hdrs.ACCEPT: "*/*"} + ) as response: + self.assertEqual(response.status, 200) + self.assertIn( + aioprometheus.formats.TEXT_CONTENT_TYPE, + response.headers.get("content-type"), + ) + # content = await response.read() + + # Get text format + async with session.get( + metrics_url, headers={aiohttp.hdrs.ACCEPT: "text/plain;"} + ) as response: + self.assertEqual(response.status, 200) + self.assertIn( + aioprometheus.formats.TEXT_CONTENT_TYPE, + response.headers.get("content-type"), + ) + + # Get binary format + async with session.get( + metrics_url, + headers={ + aiohttp.hdrs.ACCEPT: aioprometheus.formats.BINARY_CONTENT_TYPE + }, + ) as response: + self.assertEqual(response.status, 200) + self.assertIn( + aioprometheus.formats.BINARY_CONTENT_TYPE, + response.headers.get("content-type"), + ) + + await runner.cleanup() diff --git a/tests/test_quart.py b/tests/test_quart.py new file mode 100644 index 0000000..4a85361 --- /dev/null +++ b/tests/test_quart.py @@ -0,0 +1,72 @@ +import asynctest +import unittest +import aioprometheus + +try: + from quart import Quart, request + + have_quart = True +except ImportError: + have_quart = False + + [email protected](have_quart, "Quart library is not available") +class TestQuartRender(asynctest.TestCase): + """ + Test exposing Prometheus metrics from within an Quart existing web + service without starting a separate Prometheus metrics server. + """ + + async def test_render_in_quart_app(self): + """ check render usage in Quart app """ + + app = Quart(__name__) + app.registry = aioprometheus.Registry() + app.events_counter = aioprometheus.Counter("events", "Number of events.") + app.registry.register(app.events_counter) + + @app.route("/") + async def index(): + app.events_counter.inc({"path": "/"}) + return "hello" + + @app.route("/metrics") + async def handle_metrics(): + content, http_headers = aioprometheus.render( + app.registry, request.headers.getlist("accept") + ) + return content, http_headers + + # The test client also starts the web service + test_client = app.test_client() + + # Access root to increment metric counter + response = await test_client.get("/") + self.assertEqual(response.status_code, 200) + + # Get default format + response = await test_client.get("/metrics", headers={"accept": "*/*"}) + self.assertEqual(response.status_code, 200) + self.assertIn( + aioprometheus.formats.TEXT_CONTENT_TYPE, + response.headers.get("content-type"), + ) + # payload = await response.get_data() + + # Get text format + response = await test_client.get("/metrics", headers={"accept": "text/plain;"}) + self.assertEqual(response.status_code, 200) + self.assertIn( + aioprometheus.formats.TEXT_CONTENT_TYPE, + response.headers.get("content-type"), + ) + + # Get binary format + response = await test_client.get( + "/metrics", headers={"accept": aioprometheus.formats.BINARY_CONTENT_TYPE} + ) + self.assertEqual(response.status_code, 200) + self.assertIn( + aioprometheus.formats.BINARY_CONTENT_TYPE, + response.headers.get("content-type"), + ) diff --git a/tests/test_renderer.py b/tests/test_renderer.py new file mode 100644 index 0000000..a829018 --- /dev/null +++ b/tests/test_renderer.py @@ -0,0 +1,46 @@ +import asynctest +import aioprometheus + + +class TestRenderer(asynctest.TestCase): + async def test_invalid_registry(self): + """ check only valid registry can be provided """ + for invalid_registry in ["nope", dict(), list()]: + with self.assertRaises(Exception) as cm: + aioprometheus.render(invalid_registry, []) + self.assertIn("registry must be a Registry, got:", str(cm.exception)) + + async def test_invalid_accepts_headers(self): + """ check only valid accepts_headers types can be provided """ + registry = aioprometheus.Registry() + for accepts_headers in ["nope", None, 42, dict()]: + with self.assertRaises(Exception) as cm: + aioprometheus.render(registry, accepts_headers) + self.assertIn("accepts_headers must be a sequence, got:", str(cm.exception)) + + async def test_render_default(self): + """ check metrics can be rendered using default format """ + accepts_headers = ("application/json", "*/*", "application/nothing") + registry = aioprometheus.Registry() + content, http_headers = aioprometheus.render(registry, accepts_headers) + self.assertEqual( + http_headers["Content-Type"], aioprometheus.formats.TEXT_CONTENT_TYPE + ) + + async def test_render_text(self): + """ check metrics can be rendered using text format """ + accepts_headers = ("text/plain;",) + registry = aioprometheus.Registry() + content, http_headers = aioprometheus.render(registry, accepts_headers) + self.assertEqual( + http_headers["Content-Type"], aioprometheus.formats.TEXT_CONTENT_TYPE + ) + + async def test_render_binary(self): + """ check metrics can be rendered using binary format """ + accepts_headers = (aioprometheus.formats.BINARY_CONTENT_TYPE,) + registry = aioprometheus.Registry() + content, http_headers = aioprometheus.render(registry, accepts_headers) + self.assertEqual( + http_headers["Content-Type"], aioprometheus.formats.BINARY_CONTENT_TYPE + ) diff --git a/tests/test_vibora.py b/tests/test_vibora.py new file mode 100644 index 0000000..0fad923 --- /dev/null +++ b/tests/test_vibora.py @@ -0,0 +1,81 @@ +import asynctest +import unittest +import aioprometheus + +try: + from vibora import Vibora, Request, Response + + have_vibora = True +except ImportError: + have_vibora = False + + [email protected](have_vibora, "Vibora library is not available") +class TestViboraRender(asynctest.TestCase): + """ + Test exposing Prometheus metrics from within an Vibora existing web + service without starting a separate Prometheus metrics server. + """ + + async def test_render_in_vibora_app(self): + """ check render usage in Vibora app """ + + app = Vibora(__name__) + app.registry = aioprometheus.Registry() + app.events_counter = aioprometheus.Counter("events", "Number of events.") + app.registry.register(app.events_counter) + + @app.route("/") + async def index(request: Request): + app.events_counter.inc({"path": "/"}) + return Response(b"hello") + + @app.route("/metrics") + async def handle_metrics(request: Request): + """ + Negotiate a response format by inspecting the ACCEPTS headers and selecting + the most efficient format. Render metrics in the registry into the chosen + format and return a response. + """ + content, http_headers = aioprometheus.render( + app.registry, [request.headers.get("accept")] + ) + return Response(content, headers=http_headers) + + # NOTE: Vibora client.get HTTP headers handling seem to expect case-sensitive. + # Must use Accept and not accept or ACCEPT! Where as response handling of + # requests doesn't seem to care. + # Until Vibora #139 is resolved we must use "Accept". + + # The test client also starts the web service + client = app.test_client() + + # Access root to increment metric counter + response = await client.get("/") + self.assertEqual(response.status_code, 200) + + # Get default format + response = await client.get("/metrics", headers={"Accept": "*/*"}) + self.assertEqual(response.status_code, 200) + self.assertIn( + aioprometheus.formats.TEXT_CONTENT_TYPE, + [response.headers.get("Content-Type")], + ) + + # Get text format + response = await client.get("/metrics", headers={"Accept": "text/plain;"}) + self.assertEqual(response.status_code, 200) + self.assertIn( + aioprometheus.formats.TEXT_CONTENT_TYPE, + [response.headers.get("content-type")], + ) + + # # Get binary format + response = await client.get( + "/metrics", headers={"Accept": aioprometheus.formats.BINARY_CONTENT_TYPE} + ) + self.assertEqual(response.status_code, 200) + self.assertIn( + aioprometheus.formats.BINARY_CONTENT_TYPE, + [response.headers.get("content-type")], + )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 12 }
18.01
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-asyncio", "black", "sphinx", "coverage", "mypy", "twine", "wheel" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiohttp==3.8.6 -e git+https://github.com/claws/aioprometheus.git@bc9975761a1b938e91ec1aa81120d8a78c911d21#egg=aioprometheus aiosignal==1.2.0 alabaster==0.7.13 async-timeout==4.0.2 asynctest==0.13.0 attrs==22.2.0 Babel==2.11.0 black==22.8.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 click==8.0.4 colorama==0.4.5 coverage==6.2 cryptography==40.0.2 dataclasses==0.8 docutils==0.18.1 frozenlist==1.2.0 idna==3.10 idna-ssl==1.1.0 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jeepney==0.7.1 Jinja2==3.0.3 keyring==23.4.1 MarkupSafe==2.0.1 multidict==5.2.0 mypy==0.971 mypy-extensions==1.0.0 packaging==21.3 pathspec==0.9.0 pkginfo==1.10.0 platformdirs==2.4.0 pluggy==1.0.0 prometheus-metrics-proto==18.1.2 protobuf==3.19.6 py==1.11.0 pycparser==2.21 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytz==2025.2 quantile-python==1.1 readme-renderer==34.0 requests==2.27.1 requests-toolbelt==1.0.0 rfc3986==1.5.0 SecretStorage==3.3.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 tqdm==4.64.1 twine==3.8.0 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 yarl==1.7.2 zipp==3.6.0
name: aioprometheus channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiohttp==3.8.6 - aiosignal==1.2.0 - alabaster==0.7.13 - async-timeout==4.0.2 - asynctest==0.13.0 - attrs==22.2.0 - babel==2.11.0 - black==22.8.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - click==8.0.4 - colorama==0.4.5 - coverage==6.2 - cryptography==40.0.2 - dataclasses==0.8 - docutils==0.18.1 - frozenlist==1.2.0 - idna==3.10 - idna-ssl==1.1.0 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jeepney==0.7.1 - jinja2==3.0.3 - keyring==23.4.1 - markupsafe==2.0.1 - multidict==5.2.0 - mypy==0.971 - mypy-extensions==1.0.0 - packaging==21.3 - pathspec==0.9.0 - pkginfo==1.10.0 - platformdirs==2.4.0 - pluggy==1.0.0 - prometheus-metrics-proto==18.1.2 - protobuf==3.19.6 - py==1.11.0 - pycparser==2.21 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytz==2025.2 - quantile-python==1.1 - readme-renderer==34.0 - requests==2.27.1 - requests-toolbelt==1.0.0 - rfc3986==1.5.0 - secretstorage==3.3.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - tqdm==4.64.1 - twine==3.8.0 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - yarl==1.7.2 - zipp==3.6.0 prefix: /opt/conda/envs/aioprometheus
[ "tests/test_aiohttp.py::TestAiohttpRender::test_render_in_aiohttp_app", "tests/test_renderer.py::TestRenderer::test_invalid_accepts_headers", "tests/test_renderer.py::TestRenderer::test_invalid_registry", "tests/test_renderer.py::TestRenderer::test_render_binary", "tests/test_renderer.py::TestRenderer::test_render_default", "tests/test_renderer.py::TestRenderer::test_render_text" ]
[]
[]
[]
null
2,811
[ "README.rst", "examples/simple-example.py", "examples/frameworks/aiohttp-example.py", "examples/frameworks/vibora-example.py", "docs/user/index.rst", "src/aioprometheus/__init__.py", "src/aioprometheus/formats/text.py", "src/aioprometheus/service.py", "examples/app-example.py", "src/aioprometheus/renderer.py", "src/aioprometheus/formats/binary.py", "src/aioprometheus/negotiator.py", "examples/frameworks/quart-example.py", "docs/index.rst", "requirements.txt", "src/aioprometheus/formats/__init__.py" ]
[ "README.rst", "examples/simple-example.py", "examples/frameworks/aiohttp-example.py", "examples/frameworks/vibora-example.py", "docs/user/index.rst", "src/aioprometheus/__init__.py", "src/aioprometheus/formats/text.py", "src/aioprometheus/service.py", "examples/app-example.py", "src/aioprometheus/renderer.py", "src/aioprometheus/formats/binary.py", "src/aioprometheus/negotiator.py", "examples/frameworks/quart-example.py", "docs/index.rst", "requirements.txt", "src/aioprometheus/formats/__init__.py" ]
cloudinary__pycloudinary-157
bce0e855fef7510397999075654d7c3f059bfee0
2018-07-23 12:48:47
374cb5773d29fc01b6af5283428dc8651aa6c50d
diff --git a/cloudinary/uploader.py b/cloudinary/uploader.py index 0f58884..66342e5 100644 --- a/cloudinary/uploader.py +++ b/cloudinary/uploader.py @@ -1,14 +1,12 @@ # Copyright Cloudinary import json -import re import socket -from os.path import getsize +import certifi from six import string_types from urllib3 import PoolManager from urllib3.exceptions import HTTPError -import certifi import cloudinary from cloudinary import utils from cloudinary.api import Error @@ -34,6 +32,8 @@ else: ca_certs=certifi.where() ) +UPLOAD_LARGE_CHUNK_SIZE = 20000000 + def upload(file, **options): params = utils.build_upload_params(**options) @@ -64,32 +64,44 @@ def upload_large(file, **options): if utils.is_remote_url(file): return upload(file, **options) - upload_id = utils.random_public_id() - with open(file, 'rb') as file_io: - results = None + if hasattr(file, 'read') and callable(file.read): + file_io = file + else: + file_io = open(file, 'rb') + + upload_result = None + + with file_io: + upload_id = utils.random_public_id() current_loc = 0 - chunk_size = options.get("chunk_size", 20000000) - file_size = getsize(file) + chunk_size = options.get("chunk_size", UPLOAD_LARGE_CHUNK_SIZE) + file_size = utils.file_io_size(file_io) + + file_name = file.name if hasattr(file, 'name') and isinstance(file.name, str) else "stream" + chunk = file_io.read(chunk_size) + while chunk: - range = "bytes {0}-{1}/{2}".format(current_loc, current_loc + len(chunk) - 1, file_size) + content_range = "bytes {0}-{1}/{2}".format(current_loc, current_loc + len(chunk) - 1, file_size) current_loc += len(chunk) + http_headers = {"Content-Range": content_range, "X-Unique-Upload-Id": upload_id} + + upload_result = upload_large_part((file_name, chunk), http_headers=http_headers, **options) + + options["public_id"] = upload_result.get("public_id") - results = upload_large_part( - (file, chunk), - http_headers={"Content-Range": range, - "X-Unique-Upload-Id": upload_id}, - **options) - options["public_id"] = results.get("public_id") chunk = file_io.read(chunk_size) - return results + + return upload_result def upload_large_part(file, **options): """ Upload large files. """ params = utils.build_upload_params(**options) + if 'resource_type' not in options: options['resource_type'] = "raw" + return call_api("upload", params, file=file, **options) diff --git a/cloudinary/utils.py b/cloudinary/utils.py index a112aa6..9a6fd1f 100644 --- a/cloudinary/utils.py +++ b/cloudinary/utils.py @@ -3,6 +3,7 @@ import base64 import copy import hashlib import json +import os import random import re import string @@ -988,3 +989,19 @@ def __json_serializer(obj): def is_remote_url(file): """Basic URL scheme check to define if it's remote URL""" return isinstance(file, string_types) and re.match(REMOTE_URL_RE, file) + + +def file_io_size(file_io): + """ + Helper function for getting file-like object size(suitable for both files and streams) + + :param file_io: io.IOBase + + :return: size + """ + initial_position = file_io.tell() + file_io.seek(0, os.SEEK_END) + size = file_io.tell() + file_io.seek(initial_position, os.SEEK_SET) + + return size
Uploading large files from a stream Hello, I've been using cloudinary for quite some time and it's been great. Thanks. Now, I need to be able to upload large files (generated reports) that might go beyond 100 MB. According to docs, I need to use the [upload_large](http://cloudinary.com/documentation/upload_images#chunked_image_upload). My issue is that I have a stringio object and I couldn't use the upload_large function to upload it to cloudinary (stringio object worked fine with cloudinary.uploader.upload). I'm getting this: `TypeError: coercing to Unicode: need string or buffer, instance found` Looking at the [implementation of upload_large](https://github.com/cloudinary/pycloudinary/blob/master/cloudinary/uploader.py#L60), I see that it's invoking `open` on the file argument which means it expects a filename/path of a file on the file system which is inconsistent compared to cloudinary.uploader.upload. So, I want to ask: - Is there a way to use a stream (maybe sth other than stringio) with `upload_large` ? - Also, I want to check if there is a constraint that needs the "large" files to be on the file system. Of course I have no file size issue as the paid cloudinary plan is customized to support uploading up to 200MB per file. Thank you.
cloudinary/pycloudinary
diff --git a/test/helper_test.py b/test/helper_test.py index 19c7060..4cb704e 100644 --- a/test/helper_test.py +++ b/test/helper_test.py @@ -1,6 +1,5 @@ import os import random - import re from datetime import timedelta, tzinfo @@ -8,6 +7,8 @@ import six from urllib3 import HTTPResponse from urllib3._collections import HTTPHeaderDict +from cloudinary import utils + SUFFIX = os.environ.get('TRAVIS_JOB_ID') or random.randint(10000, 99999) REMOTE_TEST_IMAGE = "http://cloudinary.com/images/old_logo.png" @@ -92,3 +93,21 @@ def api_response_mock(): def uploader_response_mock(): return http_response_mock('{"foo":"bar"}') + + +def populate_large_file(file_io, size, chunk_size=4096): + file_io.write(b"BMJ\xB9Y\x00\x00\x00\x00\x00\x8A\x00\x00\x00|\x00\x00\x00x\x05\x00\x00x\x05\x00\x00\x01\ +\x00\x18\x00\x00\x00\x00\x00\xC0\xB8Y\x00a\x0F\x00\x00a\x0F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\x00\ +\x00\xFF\x00\x00\xFF\x00\x00\x00\x00\x00\x00\xFFBGRs\x00\x00\x00\x00\x00\x00\x00\x00T\xB8\x1E\xFC\x00\x00\x00\x00\ +\x00\x00\x00\x00fff\xFC\x00\x00\x00\x00\x00\x00\x00\x00\xC4\xF5(\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") + + remaining_size = size - utils.file_io_size(file_io) + + while remaining_size > 0: + curr_chunk_size = min(remaining_size, chunk_size) + file_io.write(b"\xFF" * curr_chunk_size) + remaining_size -= chunk_size + + file_io.flush() + file_io.seek(0) diff --git a/test/test_uploader.py b/test/test_uploader.py index a897d9a..e03b5b9 100644 --- a/test/test_uploader.py +++ b/test/test_uploader.py @@ -7,13 +7,13 @@ from datetime import datetime import six from mock import patch -import cloudinary -from cloudinary import api, uploader, utils - from urllib3 import disable_warnings from urllib3.util import parse_url + +import cloudinary +from cloudinary import api, uploader, utils from test.helper_test import uploader_response_mock, SUFFIX, TEST_IMAGE, get_params, TEST_ICON, TEST_DOC, \ - REMOTE_TEST_IMAGE, UTC + REMOTE_TEST_IMAGE, UTC, populate_large_file MOCK_RESPONSE = uploader_response_mock() @@ -37,6 +37,11 @@ TEST_ID2 = "uploader_test_{}2".format(SUFFIX) disable_warnings() +LARGE_FILE_SIZE = 5880138 +LARGE_CHUNK_SIZE = 5243000 +LARGE_FILE_WIDTH = 1400 +LARGE_FILE_HEIGHT = 1400 + class UploaderTest(unittest.TestCase): @@ -72,11 +77,12 @@ class UploaderTest(unittest.TestCase): @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_upload_file_io_without_filename(self): """should successfully upload FileIO file """ - with io.BytesIO() as temp_file, \ - open(TEST_IMAGE, 'rb') as input_file: + with io.BytesIO() as temp_file, open(TEST_IMAGE, 'rb') as input_file: temp_file.write(input_file.read()) temp_file.seek(0) + result = uploader.upload(temp_file, tags=[UNIQUE_TAG]) + self.assertEqual(result["width"], TEST_IMAGE_WIDTH) self.assertEqual(result["height"], TEST_IMAGE_HEIGHT) self.assertEqual('stream', result["original_filename"]) @@ -325,44 +331,57 @@ P9/AFGGFyjOXZtQAAAAAElFTkSuQmCC\ @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_upload_large(self): """ should support uploading large files """ - temp_file = tempfile.NamedTemporaryFile() - temp_file_name = temp_file.name - temp_file.write(b"BMJ\xB9Y\x00\x00\x00\x00\x00\x8A\x00\x00\x00|\x00\x00\x00x\x05\x00\x00x\x05\x00\x00\x01\ -\x00\x18\x00\x00\x00\x00\x00\xC0\xB8Y\x00a\x0F\x00\x00a\x0F\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\x00\ -\x00\xFF\x00\x00\xFF\x00\x00\x00\x00\x00\x00\xFFBGRs\x00\x00\x00\x00\x00\x00\x00\x00T\xB8\x1E\xFC\x00\x00\x00\x00\ -\x00\x00\x00\x00fff\xFC\x00\x00\x00\x00\x00\x00\x00\x00\xC4\xF5(\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ -\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") - for i in range(0, 588000): - temp_file.write(b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF") - - temp_file.flush() - self.assertEqual(5880138, os.path.getsize(temp_file_name)) - - resource = uploader.upload_large(temp_file_name, chunk_size=5243000, tags=["upload_large_tag", UNIQUE_TAG]) - self.assertEqual(resource["tags"], ["upload_large_tag", UNIQUE_TAG]) - self.assertEqual(resource["resource_type"], "raw") - - resource2 = uploader.upload_large(temp_file_name, chunk_size=5243000, tags=["upload_large_tag", UNIQUE_TAG], - resource_type="image") - self.assertEqual(resource2["tags"], ["upload_large_tag", UNIQUE_TAG]) - self.assertEqual(resource2["resource_type"], "image") - self.assertEqual(resource2["width"], 1400) - self.assertEqual(resource2["height"], 1400) - - resource3 = uploader.upload_large(temp_file_name, chunk_size=5880138, tags=["upload_large_tag", UNIQUE_TAG]) - self.assertEqual(resource3["tags"], ["upload_large_tag", UNIQUE_TAG]) - self.assertEqual(resource3["resource_type"], "raw") - - # should allow fallback of upload large with remote url to regular upload + with tempfile.NamedTemporaryFile() as temp_file: + populate_large_file(temp_file, LARGE_FILE_SIZE) + temp_file_name = temp_file.name + + self.assertEqual(LARGE_FILE_SIZE, os.path.getsize(temp_file_name)) + + resource = uploader.upload_large(temp_file_name, chunk_size=LARGE_CHUNK_SIZE, + tags=["upload_large_tag", UNIQUE_TAG]) + + self.assertEqual(resource["tags"], ["upload_large_tag", UNIQUE_TAG]) + self.assertEqual(resource["resource_type"], "raw") + + resource2 = uploader.upload_large(temp_file_name, chunk_size=LARGE_CHUNK_SIZE, + tags=["upload_large_tag", UNIQUE_TAG], resource_type="image") + + self.assertEqual(resource2["tags"], ["upload_large_tag", UNIQUE_TAG]) + self.assertEqual(resource2["resource_type"], "image") + self.assertEqual(resource2["width"], LARGE_FILE_WIDTH) + self.assertEqual(resource2["height"], LARGE_FILE_HEIGHT) + + resource3 = uploader.upload_large(temp_file_name, chunk_size=LARGE_FILE_SIZE, + tags=["upload_large_tag", UNIQUE_TAG]) + + self.assertEqual(resource3["tags"], ["upload_large_tag", UNIQUE_TAG]) + self.assertEqual(resource3["resource_type"], "raw") + + @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") + def test_upload_large_url(self): + """Should allow fallback of upload large with remote url to regular upload""" resource4 = uploader.upload_large(REMOTE_TEST_IMAGE, tags=[UNIQUE_TAG]) + self.assertEqual(resource4["width"], TEST_IMAGE_WIDTH) self.assertEqual(resource4["height"], TEST_IMAGE_HEIGHT) - expected_signature = utils.api_sign_request(dict(public_id=resource4["public_id"], - version=resource4["version"]), - cloudinary.config().api_secret) + + expected_signature = utils.api_sign_request( + dict(public_id=resource4["public_id"], version=resource4["version"]), cloudinary.config().api_secret) + self.assertEqual(resource4["signature"], expected_signature) - temp_file.close() + @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") + def test_upload_large_file_io(self): + """Should support uploading large streams""" + with io.BytesIO() as temp_file: + populate_large_file(temp_file, LARGE_FILE_SIZE) + resource = uploader.upload_large(temp_file, chunk_size=LARGE_CHUNK_SIZE, + tags=["upload_large_tag", UNIQUE_TAG], resource_type="image") + + self.assertEqual(resource["tags"], ["upload_large_tag", UNIQUE_TAG]) + self.assertEqual(resource["resource_type"], "image") + self.assertEqual(resource["width"], LARGE_FILE_WIDTH) + self.assertEqual(resource["height"], LARGE_FILE_HEIGHT) @unittest.skipUnless(cloudinary.config().api_secret, "requires api_key/api_secret") def test_upload_preset(self): diff --git a/test/test_utils.py b/test/test_utils.py index ccb82fc..d5537f0 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- - +import io import re +import tempfile import unittest from collections import OrderedDict from datetime import datetime, date from fractions import Fraction +from os.path import getsize import six from mock import patch @@ -13,7 +15,6 @@ import cloudinary.utils from cloudinary.utils import build_list_of_dicts, json_encode, encode_unicode_url from test.helper_test import TEST_IMAGE, REMOTE_TEST_IMAGE - DEFAULT_ROOT_PATH = 'http://res.cloudinary.com/test123/' DEFAULT_UPLOAD_PATH = 'http://res.cloudinary.com/test123/image/upload/' VIDEO_UPLOAD_PATH = 'http://res.cloudinary.com/test123/video/upload/' @@ -824,6 +825,32 @@ class TestUtils(unittest.TestCase): self.assertFalse(cloudinary.utils.is_remote_url(TEST_IMAGE)) self.assertTrue(cloudinary.utils.is_remote_url(REMOTE_TEST_IMAGE)) + def test_file_io_size(self): + """Should return correct file size""" + test_data = b"Test data" + test_data_len = len(test_data) + + with tempfile.NamedTemporaryFile() as temp_file: + temp_file.write(test_data) + + actual_size = cloudinary.utils.file_io_size(temp_file) + + filesystem_size = getsize(temp_file.name) + + self.assertEqual(test_data_len, filesystem_size) + self.assertEqual(test_data_len, actual_size) + + with io.BytesIO() as temp_stream: + temp_stream.write(test_data) + + actual_size = cloudinary.utils.file_io_size(temp_stream) + + self.assertEqual(test_data_len, actual_size) + + with tempfile.NamedTemporaryFile() as empty_file: + actual_size = cloudinary.utils.file_io_size(empty_file) + self.assertEqual(0, actual_size) + if __name__ == '__main__': unittest.main()
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
1.11
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "tox", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 -e git+https://github.com/cloudinary/pycloudinary.git@bce0e855fef7510397999075654d7c3f059bfee0#egg=cloudinary distlib==0.3.9 filelock==3.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mock==5.2.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tox==3.28.0 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 virtualenv==20.17.1 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: pycloudinary channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - distlib==0.3.9 - filelock==3.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - mock==5.2.0 - platformdirs==2.4.0 - six==1.17.0 - tox==3.28.0 - urllib3==1.26.20 - virtualenv==20.17.1 prefix: /opt/conda/envs/pycloudinary
[ "test/test_utils.py::TestUtils::test_file_io_size" ]
[]
[ "test/test_utils.py::TestUtils::test_angle", "test/test_utils.py::TestUtils::test_array_should_define_a_set_of_variables", "test/test_utils.py::TestUtils::test_aspect_ratio", "test/test_utils.py::TestUtils::test_audio_codec", "test/test_utils.py::TestUtils::test_audio_frequency", "test/test_utils.py::TestUtils::test_background", "test/test_utils.py::TestUtils::test_base_transformation_array", "test/test_utils.py::TestUtils::test_base_transformations", "test/test_utils.py::TestUtils::test_bit_rate", "test/test_utils.py::TestUtils::test_border", "test/test_utils.py::TestUtils::test_build_list_of_dicts", "test/test_utils.py::TestUtils::test_cloud_name", "test/test_utils.py::TestUtils::test_cloud_name_options", "test/test_utils.py::TestUtils::test_cname", "test/test_utils.py::TestUtils::test_cname_subdomain", "test/test_utils.py::TestUtils::test_crop", "test/test_utils.py::TestUtils::test_default_image", "test/test_utils.py::TestUtils::test_density", "test/test_utils.py::TestUtils::test_disallow_url_suffix_in_non_upload_types", "test/test_utils.py::TestUtils::test_disallow_url_suffix_with_slash_or_dot", "test/test_utils.py::TestUtils::test_disallow_use_root_path_if_not_image_upload", "test/test_utils.py::TestUtils::test_dollar_key_should_define_a_variable", "test/test_utils.py::TestUtils::test_dpr", "test/test_utils.py::TestUtils::test_duration", "test/test_utils.py::TestUtils::test_effect", "test/test_utils.py::TestUtils::test_effect_with_array", "test/test_utils.py::TestUtils::test_effect_with_dict", "test/test_utils.py::TestUtils::test_encode_context", "test/test_utils.py::TestUtils::test_encode_unicode_url", "test/test_utils.py::TestUtils::test_end_offset", "test/test_utils.py::TestUtils::test_escape_public_id", "test/test_utils.py::TestUtils::test_escape_public_id_with_non_ascii_characters", "test/test_utils.py::TestUtils::test_fetch", "test/test_utils.py::TestUtils::test_fetch_format", "test/test_utils.py::TestUtils::test_fetch_overlay", "test/test_utils.py::TestUtils::test_flags", "test/test_utils.py::TestUtils::test_folder_version", "test/test_utils.py::TestUtils::test_format", "test/test_utils.py::TestUtils::test_html_width_height_on_angle", "test/test_utils.py::TestUtils::test_html_width_height_on_crop_fit_limit", "test/test_utils.py::TestUtils::test_http_escape", "test/test_utils.py::TestUtils::test_http_private_cdn", "test/test_utils.py::TestUtils::test_ignore_http", "test/test_utils.py::TestUtils::test_is_remote_url", "test/test_utils.py::TestUtils::test_json_encode", "test/test_utils.py::TestUtils::test_keyframe_interval", "test/test_utils.py::TestUtils::test_merge", "test/test_utils.py::TestUtils::test_no_empty_transformation", "test/test_utils.py::TestUtils::test_norm_auto_range_value", "test/test_utils.py::TestUtils::test_norm_range_value", "test/test_utils.py::TestUtils::test_not_sign_the_url_suffix", "test/test_utils.py::TestUtils::test_offset", "test/test_utils.py::TestUtils::test_original_width_and_height", "test/test_utils.py::TestUtils::test_overlay", "test/test_utils.py::TestUtils::test_overlay_error_1", "test/test_utils.py::TestUtils::test_overlay_error_2", "test/test_utils.py::TestUtils::test_overlay_options", "test/test_utils.py::TestUtils::test_page", "test/test_utils.py::TestUtils::test_put_format_after_url_suffix", "test/test_utils.py::TestUtils::test_raw_transformation", "test/test_utils.py::TestUtils::test_resource_type", "test/test_utils.py::TestUtils::test_responsive_width", "test/test_utils.py::TestUtils::test_secure_akamai", "test/test_utils.py::TestUtils::test_secure_distibution", "test/test_utils.py::TestUtils::test_secure_distribution", "test/test_utils.py::TestUtils::test_secure_distribution_overwrite", "test/test_utils.py::TestUtils::test_secure_non_akamai", "test/test_utils.py::TestUtils::test_shorten", "test/test_utils.py::TestUtils::test_should_place_defined_variables_before_ordered", "test/test_utils.py::TestUtils::test_should_sort_defined_variable", "test/test_utils.py::TestUtils::test_should_support_auto_value", "test/test_utils.py::TestUtils::test_should_support_auto_width", "test/test_utils.py::TestUtils::test_should_support_string_interpolation", "test/test_utils.py::TestUtils::test_should_support_text_values", "test/test_utils.py::TestUtils::test_signed_url", "test/test_utils.py::TestUtils::test_size", "test/test_utils.py::TestUtils::test_start_offset", "test/test_utils.py::TestUtils::test_streaming_profile", "test/test_utils.py::TestUtils::test_support_a_percent_value", "test/test_utils.py::TestUtils::test_support_cdn_subdomain_with_secure_on_if_using_shared_domain", "test/test_utils.py::TestUtils::test_support_secure_cdn_subdomain_true_override_with_secure", "test/test_utils.py::TestUtils::test_support_url_suffix_for_private_cdn", "test/test_utils.py::TestUtils::test_support_url_suffix_for_raw_uploads", "test/test_utils.py::TestUtils::test_support_use_root_path_for_private_cdn", "test/test_utils.py::TestUtils::test_support_use_root_path_for_shared_cdn", "test/test_utils.py::TestUtils::test_support_use_root_path_together_with_url_suffix_for_private_cdn", "test/test_utils.py::TestUtils::test_transformation_array", "test/test_utils.py::TestUtils::test_transformation_simple", "test/test_utils.py::TestUtils::test_translate_if", "test/test_utils.py::TestUtils::test_type", "test/test_utils.py::TestUtils::test_underlay", "test/test_utils.py::TestUtils::test_user_agent", "test/test_utils.py::TestUtils::test_various_options", "test/test_utils.py::TestUtils::test_video_codec", "test/test_utils.py::TestUtils::test_video_sampling" ]
[]
MIT License
2,812
[ "cloudinary/uploader.py", "cloudinary/utils.py" ]
[ "cloudinary/uploader.py", "cloudinary/utils.py" ]
pypa__setuptools_scm-282
ae5533a4ee3be9f06270bcf9c4e152bedf59b832
2018-07-23 13:39:46
3ae1cad231545abfeedea9aaa7405e15fb28d95c
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a8d023c..bd68fab 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,8 @@ +v3.0.1 +======= + +* fix a regression in setuptools_scm.git.parse - reorder arguments so the positional invocation from before works as expected + v3.0.0 ======= diff --git a/src/setuptools_scm/git.py b/src/setuptools_scm/git.py index 07b976b..5ba9ecf 100644 --- a/src/setuptools_scm/git.py +++ b/src/setuptools_scm/git.py @@ -83,7 +83,9 @@ def fail_on_shallow(wd): ) -def parse(root, config=None, describe_command=DEFAULT_DESCRIBE, pre_parse=warn_on_shallow): +def parse( + root, describe_command=DEFAULT_DESCRIBE, pre_parse=warn_on_shallow, config=None +): """ :param pre_parse: experimental pre_parse action, may change at any time """ @@ -120,7 +122,14 @@ def parse(root, config=None, describe_command=DEFAULT_DESCRIBE, pre_parse=warn_o branch = wd.get_branch() if number: - return meta(tag, config=config, distance=number, node=node, dirty=dirty, branch=branch) + return meta( + tag, + config=config, + distance=number, + node=node, + dirty=dirty, + branch=branch, + ) else: return meta(tag, config=config, node=node, dirty=dirty, branch=branch)
AttributeError: 'str' object has no attribute 'absolute_root' It seems that our Appveyor builds have started failing with the newly-released 3.0.0: ``` (arrow) C:\projects\arrow\python>python setup.py build_ext install -q --single-version-externally-managed --record=record.text bdist_wheel -q || exit /B Traceback (most recent call last): File "setup.py", line 545, in <module> url="https://arrow.apache.org/" File "C:\Miniconda36-x64\envs\arrow\lib\site-packages\setuptools\__init__.py", line 131, in setup return distutils.core.setup(**attrs) File "C:\Miniconda36-x64\envs\arrow\lib\distutils\core.py", line 108, in setup _setup_distribution = dist = klass(attrs) File "C:\Miniconda36-x64\envs\arrow\lib\site-packages\setuptools\dist.py", line 370, in __init__ k: v for k, v in attrs.items() File "C:\Miniconda36-x64\envs\arrow\lib\distutils\dist.py", line 281, in __init__ self.finalize_options() File "C:\Miniconda36-x64\envs\arrow\lib\site-packages\setuptools\dist.py", line 529, in finalize_options ep.load()(self, ep.name, value) File "c:\projects\arrow\python\.eggs\setuptools_scm-3.0.0-py3.6.egg\setuptools_scm\integration.py", line 23, in version_keyword dist.metadata.version = get_version(**value) File "c:\projects\arrow\python\.eggs\setuptools_scm-3.0.0-py3.6.egg\setuptools_scm\__init__.py", line 128, in get_version parsed_version = _do_parse(config) File "c:\projects\arrow\python\.eggs\setuptools_scm-3.0.0-py3.6.egg\setuptools_scm\__init__.py", line 73, in _do_parse parse_result = _call_entrypoint_fn(config, config.parse) File "c:\projects\arrow\python\.eggs\setuptools_scm-3.0.0-py3.6.egg\setuptools_scm\__init__.py", line 36, in _call_entrypoint_fn return fn(config.absolute_root) File "setup.py", line 496, in parse_version version = setuptools_scm.git.parse(root, describe) File "c:\projects\arrow\python\.eggs\setuptools_scm-3.0.0-py3.6.egg\setuptools_scm\git.py", line 96, in parse wd = GitWorkdir.from_potential_worktree(config.absolute_root) AttributeError: 'str' object has no attribute 'absolute_root' ```
pypa/setuptools_scm
diff --git a/testing/test_git.py b/testing/test_git.py index dd5f817..d854a7c 100644 --- a/testing/test_git.py +++ b/testing/test_git.py @@ -28,6 +28,11 @@ def test_parse_describe_output(given, tag, number, node, dirty): assert parsed == (tag, number, node, dirty) [email protected]("https://github.com/pypa/setuptools_scm/issues/281") +def test_parse_call_order(wd): + git.parse(str(wd.cwd), git.DEFAULT_DESCRIBE) + + def test_version_from_git(wd): assert wd.version == "0.1.dev0"
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
3.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 -e git+https://github.com/pypa/setuptools_scm.git@ae5533a4ee3be9f06270bcf9c4e152bedf59b832#egg=setuptools_scm tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: setuptools_scm channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/setuptools_scm
[ "testing/test_git.py::test_parse_call_order" ]
[]
[ "testing/test_git.py::test_parse_describe_output[3.3.1-rc26-0-g9df187b-3.3.1-rc26-0-g9df187b-False]", "testing/test_git.py::test_parse_describe_output[17.33.0-rc-17-g38c3047c0-17.33.0-rc-17-g38c3047c0-False]", "testing/test_git.py::test_version_from_git", "testing/test_git.py::test_unicode_version_scheme", "testing/test_git.py::test_git_worktree", "testing/test_git.py::test_git_dirty_notag", "testing/test_git.py::test_git_parse_shallow_warns", "testing/test_git.py::test_git_parse_shallow_fail", "testing/test_git.py::test_git_shallow_autocorrect", "testing/test_git.py::test_find_files_stop_at_root_git", "testing/test_git.py::test_parse_no_worktree", "testing/test_git.py::test_alphanumeric_tags_match", "testing/test_git.py::test_git_archive_export_ignore", "testing/test_git.py::test_git_archive_subdirectory", "testing/test_git.py::test_git_archive_run_from_subdirectory", "testing/test_git.py::test_git_feature_branch_increments_major" ]
[]
MIT License
2,813
[ "src/setuptools_scm/git.py", "CHANGELOG.rst" ]
[ "src/setuptools_scm/git.py", "CHANGELOG.rst" ]
pypa__setuptools_scm-285
3214d3e665b296a85f12fc1c20430b311d7f2975
2018-07-23 16:55:22
3ae1cad231545abfeedea9aaa7405e15fb28d95c
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bd68fab..ea83b4e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,7 +1,13 @@ +v3.0.2 +====== + +* fix a regression from tag parsing - support for multi-dashed prefixes - #284 + + v3.0.1 ======= -* fix a regression in setuptools_scm.git.parse - reorder arguments so the positional invocation from before works as expected +* fix a regression in setuptools_scm.git.parse - reorder arguments so the positional invocation from before works as expected #281 v3.0.0 ======= diff --git a/src/setuptools_scm/__init__.py b/src/setuptools_scm/__init__.py index 58be234..49c0c4b 100644 --- a/src/setuptools_scm/__init__.py +++ b/src/setuptools_scm/__init__.py @@ -29,10 +29,14 @@ def version_from_scm(root): def _call_entrypoint_fn(config, fn): - if function_has_arg(fn, 'config'): + if function_has_arg(fn, "config"): return fn(config.absolute_root, config=config) else: - warnings.warn("parse functions are required to provide a named argument 'config' in the future.", PendingDeprecationWarning) + warnings.warn( + "parse functions are required to provide a named argument" + " 'config' in the future.", + PendingDeprecationWarning, + ) return fn(config.absolute_root) @@ -75,13 +79,16 @@ def _do_parse(config): raise TypeError( "version parse result was a string\nplease return a parsed version" ) - version = parse_result or \ - _version_from_entrypoint(config, "setuptools_scm.parse_scm_fallback") - + version = parse_result or _version_from_entrypoint( + config, "setuptools_scm.parse_scm_fallback" + ) else: # include fallbacks after dropping them from the main entrypoint - version = _version_from_entrypoint(config, "setuptools_scm.parse_scm") or \ - _version_from_entrypoint(config, "setuptools_scm.parse_scm_fallback") + version = _version_from_entrypoint( + config, "setuptools_scm.parse_scm" + ) or _version_from_entrypoint( + config, "setuptools_scm.parse_scm_fallback" + ) if version: return version @@ -114,7 +121,7 @@ def get_version( in the root of the repository to direct setuptools_scm to the root of the repository by supplying ``__file__``. """ - + config = Configuration() config.root = root config.version_scheme = version_scheme diff --git a/src/setuptools_scm/config.py b/src/setuptools_scm/config.py index dbd3768..b1c07f8 100644 --- a/src/setuptools_scm/config.py +++ b/src/setuptools_scm/config.py @@ -6,8 +6,8 @@ import warnings from .utils import trace -DEFAULT_TAG_REGEX = r'^(?:\w+-)?(?P<version>v?\d+(?:\.\d+){0,2}[^\+]+)(?:\+.*)?$' -DEFAULT_VERSION_SCHEME = 'version_scheme' +DEFAULT_TAG_REGEX = r"^(?:[\w-]+-)?(?P<version>v?\d+(?:\.\d+){0,2}[^\+]+)(?:\+.*)?$" +DEFAULT_VERSION_SCHEME = "version_scheme" def _check_tag_regex(value): @@ -16,9 +16,11 @@ def _check_tag_regex(value): regex = re.compile(value) group_names = regex.groupindex.keys() - if regex.groups == 0 or (regex.groups > 1 and 'version' not in group_names): - warnings.warn("Expected tag_regex to contain a single match group or a group named 'version' " + - "to identify the version part of any tag.") + if regex.groups == 0 or (regex.groups > 1 and "version" not in group_names): + warnings.warn( + "Expected tag_regex to contain a single match group or a group named" + " 'version' to identify the version part of any tag." + ) return regex @@ -26,7 +28,10 @@ def _check_tag_regex(value): def _check_absolute_root(root, relative_to): if relative_to: if os.path.isabs(root) and not root.startswith(relative_to): - warnings.warn("absolute root path '%s' overrides relative_to '%s'" % (root, relative_to)) + warnings.warn( + "absolute root path '%s' overrides relative_to '%s'" + % (root, relative_to) + ) root = os.path.join(os.path.dirname(relative_to), root) return os.path.abspath(root) @@ -44,17 +49,15 @@ class Configuration(object): _tag_regex = None _absolute_root = None - def __init__(self, - relative_to=None, - root='.'): + def __init__(self, relative_to=None, root="."): # TODO: self._relative_to = relative_to - self._root = '.' + self._root = "." self.root = root self.version_scheme = DEFAULT_VERSION_SCHEME self.local_scheme = "node-and-date" - self.write_to = '' + self.write_to = "" self.write_to_template = None self.parse = None self.tag_regex = DEFAULT_TAG_REGEX
How to use `write_to_template` parameter? Would someone please show an example or two of how to use the `write_to_template` parameter? I've tried using some of the patterns listed in https://github.com/pypa/setuptools_scm#default-versioning-scheme but they exit with KeyError ~~~ scm_version_options = { 'write_to_template': '{tag}+dYYYMMMDD', 'write_to' : 'version.py' } setup( name = 'leo', use_scm_version = scm_version_options, setup_requires=['setuptools_scm'], ...snip... ) ~~~ ~~~ >python setup.py check Traceback (most recent call last): File "setup.py", line 81, in <module> 'gui_scripts' : ['leo = leo.core.runLeo:run'] File "C:\apps\Miniconda3\lib\distutils\core.py", line 108, in setup _setup_distribution = dist = klass(attrs) File "C:\apps\Miniconda3\lib\site-packages\setuptools\dist.py", line 338, in __init__ _Distribution.__init__(self, attrs) File "C:\apps\Miniconda3\lib\distutils\dist.py", line 281, in __init__ self.finalize_options() File "C:\apps\Miniconda3\lib\site-packages\setuptools\dist.py", line 471, in finalize_options ep.load()(self, ep.name, value) File "C:\apps\Miniconda3\lib\site-packages\setuptools_scm\integration.py", line 22, in version_keyword dist.metadata.version = get_version(**value) File "C:\apps\Miniconda3\lib\site-packages\setuptools_scm\__init__.py", line 130, in get_version template=write_to_template) File "C:\apps\Miniconda3\lib\site-packages\setuptools_scm\__init__.py", line 50, in dump_version dump = template.format(version=version) KeyError: 'tag' ~~~ AssertionError: cant parse version apache-arrow-0.9.0 I'm getting the following error with 3.0.1: ``` /home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/distutils/dist.py:267: UserWarning: Unknown distribution option: 'long_description_content_type' warnings.warn(msg) /home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/version.py:92: UserWarning: tag 'apache-arrow-0.9.0' no version found warnings.warn("tag %r no version found" % (tag,)) Traceback (most recent call last): File "setup.py", line 545, in <module> url="https://arrow.apache.org/" File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools/__init__.py", line 129, in setup return distutils.core.setup(**attrs) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/distutils/core.py", line 111, in setup _setup_distribution = dist = klass(attrs) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools/dist.py", line 372, in __init__ _Distribution.__init__(self, attrs) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/distutils/dist.py", line 287, in __init__ self.finalize_options() File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools/dist.py", line 528, in finalize_options ep.load()(self, ep.name, value) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/integration.py", line 23, in version_keyword dist.metadata.version = get_version(**value) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/__init__.py", line 128, in get_version parsed_version = _do_parse(config) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/__init__.py", line 73, in _do_parse parse_result = _call_entrypoint_fn(config, config.parse) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/__init__.py", line 36, in _call_entrypoint_fn return fn(config.absolute_root) File "setup.py", line 496, in parse_version version = setuptools_scm.git.parse(root, describe) File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/git.py", line 131, in parse branch=branch, File "/home/antoine/miniconda3/envs/pyarrow27/lib/python2.7/site-packages/setuptools_scm/version.py", line 184, in meta assert parsed_version is not None, "cant parse version %s" % tag AssertionError: cant parse version apache-arrow-0.9.0 ```
pypa/setuptools_scm
diff --git a/testing/test_config.py b/testing/test_config.py new file mode 100644 index 0000000..89181b8 --- /dev/null +++ b/testing/test_config.py @@ -0,0 +1,18 @@ +from setuptools_scm.config import Configuration + +import pytest + + [email protected]( + "tag, expected_version", + [ + ("apache-arrow-0.9.0", "0.9.0"), + ("arrow-0.9.0", "0.9.0"), + ("arrow-0.9.0-rc", "0.9.0-rc"), + ], +) +def test_tag_regex(tag, expected_version): + config = Configuration() + match = config.tag_regex.match(tag) + version = match.group("version") + assert version == expected_version
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
3.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 -e git+https://github.com/pypa/setuptools_scm.git@3214d3e665b296a85f12fc1c20430b311d7f2975#egg=setuptools_scm toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: setuptools_scm channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/setuptools_scm
[ "testing/test_config.py::test_tag_regex[apache-arrow-0.9.0-0.9.0]" ]
[]
[ "testing/test_config.py::test_tag_regex[arrow-0.9.0-0.9.0]", "testing/test_config.py::test_tag_regex[arrow-0.9.0-rc-0.9.0-rc]" ]
[]
MIT License
2,814
[ "CHANGELOG.rst", "src/setuptools_scm/__init__.py", "src/setuptools_scm/config.py" ]
[ "CHANGELOG.rst", "src/setuptools_scm/__init__.py", "src/setuptools_scm/config.py" ]
oasis-open__cti-stix-validator-69
e47ffb22958eb81d60b293f3efcc2fab33827f14
2018-07-23 18:54:55
e47ffb22958eb81d60b293f3efcc2fab33827f14
diff --git a/stix2validator/__init__.py b/stix2validator/__init__.py index 83efa8d..7cc2db5 100644 --- a/stix2validator/__init__.py +++ b/stix2validator/__init__.py @@ -1,7 +1,6 @@ # Expose certain functions and classes to the stix2validator namespace # flake8: noqa -import logging import sys from .errors import ValidationError @@ -11,5 +10,3 @@ from .validator import (run_validation, validate, validate_file, validate_instance, validate_parsed_json, validate_string) from .version import __version__ - -logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s') diff --git a/stix2validator/output.py b/stix2validator/output.py index b2f3030..f011807 100644 --- a/stix2validator/output.py +++ b/stix2validator/output.py @@ -1,5 +1,4 @@ import logging -import operator import sys from colorama import Fore, Style, init @@ -36,7 +35,7 @@ def set_silent(silence_output=False): def error(msg, status=codes.EXIT_FAILURE): - """Prints a message to the stderr prepended by '[X]' and calls + """Print a message to the stderr prepended by '[X]' and calls ```sys.exit(status)``. Args: @@ -49,7 +48,7 @@ def error(msg, status=codes.EXIT_FAILURE): def info(msg): - """Prints a message to stdout, prepended by '[-]'. + """Print a message to stdout, prepended by '[-]'. Note: If the application is not running in verbose mode, this function will @@ -66,7 +65,7 @@ def info(msg): def print_level(log_function, fmt, level, *args): - """Prints a formatted message to stdout prepended by spaces. Useful for + """Print a formatted message to stdout prepended by spaces. Useful for printing hierarchical information, like bullet lists. Note: @@ -101,13 +100,13 @@ def print_level(log_function, fmt, level, *args): def print_fatal_results(results, level=0): - """Prints fatal errors that occurred during validation runs. + """Print fatal errors that occurred during validation runs. """ print_level(logger.critical, _RED + "[X] Fatal Error: %s", level, results.error) def print_schema_results(results, level=0): - """Prints JSON Schema validation errors to stdout. + """Print JSON Schema validation errors to stdout. Args: results: An instance of ObjectValidationResults. @@ -119,7 +118,7 @@ def print_schema_results(results, level=0): def print_warning_results(results, level=0): - """Prints warning messages found during validation. + """Print warning messages found during validation. """ marker = _YELLOW + "[!] " @@ -128,7 +127,7 @@ def print_warning_results(results, level=0): def print_horizontal_rule(): - """Prints a horizontal rule. + """Print a horizontal rule. Note: If the application is running in "Silent Mode" @@ -142,32 +141,72 @@ def print_horizontal_rule(): logger.info("=" * 80) +def print_results_header(identifier, is_valid): + """Print a header for the results of either a file or an object. + + """ + print_horizontal_rule() + print_level(logger.info, "[-] Results for: %s", 0, identifier) + + if is_valid: + marker = _GREEN + "[+]" + verdict = "Valid" + log_func = logger.info + else: + marker = _RED + "[X]" + verdict = "Invalid" + log_func = logger.error + print_level(log_func, "%s STIX JSON: %s", 0, marker, verdict) + + +def print_object_results(obj_result): + """Print the results of validating an object. + + Args: + obj_result: An ObjectValidationResults instance. + + """ + print_results_header(obj_result.object_id, obj_result.is_valid) + + if obj_result.warnings: + print_warning_results(obj_result, 1) + if obj_result.errors: + print_schema_results(obj_result, 1) + + +def print_file_results(file_result): + """Print the results of validating a file. + + Args: + file_result: A FileValidationResults instance. + + """ + print_results_header(file_result.filepath, file_result.is_valid) + + for object_result in file_result.object_results: + if object_result.warnings: + print_warning_results(object_result, 1) + if object_result.errors: + print_schema_results(object_result, 1) + + if file_result.fatal: + print_fatal_results(file_result.fatal, 1) + + def print_results(results): - """Prints `results` (the results of validation) to stdout. + """Print `results` (the results of validation) to stdout. Args: - results: A list of FileValidationResults instances. + results: A list of FileValidationResults or ObjectValidationResults + instances. """ - for file_result in sorted(results, key=operator.attrgetter("filepath")): - print_horizontal_rule() - print_level(logger.info, "[-] Results for: %s", 0, file_result.filepath) - - if file_result.is_valid: - marker = _GREEN + "[+]" - verdict = "Valid" - log_func = logger.info - else: - marker = _RED + "[X]" - verdict = "Invalid" - log_func = logger.error - print_level(log_func, "%s STIX JSON: %s", 0, marker, verdict) - - for object_result in file_result.object_results: - if object_result.warnings: - print_warning_results(object_result, 1) - if object_result.errors: - print_schema_results(object_result, 1) - - if file_result.fatal: - print_fatal_results(file_result.fatal, 1) + if not isinstance(results, list): + results = [results] + + for r in results: + try: + r.log() + except AttributeError: + raise ValueError('Argument to print_results() must be a list of ' + 'FileValidationResults or ObjectValidationResults.') diff --git a/stix2validator/schemas b/stix2validator/schemas index 7373376..8f2448b 160000 --- a/stix2validator/schemas +++ b/stix2validator/schemas @@ -1,1 +1,1 @@ -Subproject commit 73733762141d642a92a28fcab6ad4734866d5309 +Subproject commit 8f2448bd97e458dba103bbcf5249837781e3b716 diff --git a/stix2validator/scripts/stix2_validator.py b/stix2validator/scripts/stix2_validator.py index 63d6927..7148a47 100644 --- a/stix2validator/scripts/stix2_validator.py +++ b/stix2validator/scripts/stix2_validator.py @@ -10,6 +10,7 @@ import sys from stix2validator import (ValidationError, codes, output, parse_args, print_results, run_validation) +logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s') logger = logging.getLogger(__name__) diff --git a/stix2validator/validator.py b/stix2validator/validator.py index ba5df60..2e73c6e 100644 --- a/stix2validator/validator.py +++ b/stix2validator/validator.py @@ -175,6 +175,11 @@ class FileValidationResults(BaseResults): else: self._object_results = [object_results] + def log(self): + """Print (log) these file validation results. + """ + output.print_file_results(self) + class ObjectValidationResults(BaseResults): """Results of JSON schema validation for a single STIX object. @@ -191,10 +196,12 @@ class ObjectValidationResults(BaseResults): Attributes: is_valid: ``True`` if the validation was successful and ``False`` otherwise. + object_id: ID of the STIX object. """ - def __init__(self, is_valid=False, errors=None, warnings=None): + def __init__(self, is_valid=False, object_id=None, errors=None, warnings=None): super(ObjectValidationResults, self).__init__(is_valid) + self.object_id = object_id self.errors = errors self.warnings = warnings @@ -232,6 +239,11 @@ class ObjectValidationResults(BaseResults): return d + def log(self): + """Print (log) these file validation results. + """ + output.print_object_results(self) + class ValidationErrorResults(BaseResults): """Results of a failed validation due to a raised Exception. @@ -370,23 +382,23 @@ def validate_parsed_json(obj_json, options=None): init_requests_cache(options.refresh_cache) results = None - try: - if validating_list: - # Doing it this way instead of using a comprehension means that - # initial validation results will be retained, even if a later - # exception aborts the sequence. - results = [] - for obj in obj_json: + if validating_list: + results = [] + for obj in obj_json: + try: results.append(validate_instance(obj, options)) - else: + except SchemaInvalidError as ex: + error_result = ObjectValidationResults(is_valid=False, + object_id=obj.get('id', ''), + errors=[str(ex)]) + results.append(error_result) + else: + try: results = validate_instance(obj_json, options) - - except SchemaInvalidError as ex: - error_result = ObjectValidationResults(is_valid=False, - errors=[str(ex)]) - if validating_list: - results.append(error_result) - else: + except SchemaInvalidError as ex: + error_result = ObjectValidationResults(is_valid=False, + object_id=obj_json.get('id', ''), + errors=[str(ex)]) results = error_result if not options.no_cache and options.clear_cache: @@ -447,8 +459,9 @@ def validate_file(fn, options=None): "validation will be performed: {error}") output.info(msg.format(fn=fn, error=str(ex))) - file_results.is_valid = all(object_result.is_valid - for object_result in file_results.object_results) + file_results.is_valid = (all(object_result.is_valid + for object_result in file_results.object_results) + and not file_results.fatal) return file_results @@ -708,5 +721,5 @@ def validate_instance(instance, options=None): else: valid = True - return ObjectValidationResults(is_valid=valid, errors=error_list, - warnings=warnings) + return ObjectValidationResults(is_valid=valid, object_id=instance.get('id', ''), + errors=error_list, warnings=warnings)
The validator logger has introduced problems for the elevator logger Once the validator 1.0.1 was used with the elevator, each elevator message was output twice. This temporary fix (https://github.com/oasis-open/cti-stix-elevator/commit/b7ce45baf17c0652afaf536415a3781021000f49) in the elevator solved the problem, but we should look into how loggers work together when stix utilities call each other. print_results: 'ObjectValidationResults' object is not iterable ```python3 from stix2validator import validate_string, print_results results = validate_string(sample.stix2) print_results(results) ``` Produces: File "lib/python3.6/site-packages/stix2validator/output.py", line 152, in print_results for file_result in sorted(results, key=operator.attrgetter("filepath")): TypeError: 'ObjectValidationResults' object is not iterable Using: stix2-validator==1.0.1 Python 3.6.3
oasis-open/cti-stix-validator
diff --git a/stix2validator/test/indicator_tests.py b/stix2validator/test/indicator_tests.py index df31bae..9f33896 100644 --- a/stix2validator/test/indicator_tests.py +++ b/stix2validator/test/indicator_tests.py @@ -2,7 +2,7 @@ import copy import json from . import ValidatorTest -from .. import validate_parsed_json, validate_string +from .. import ValidationOptions, validate_parsed_json, validate_string VALID_INDICATOR = u""" { @@ -19,6 +19,13 @@ VALID_INDICATOR = u""" } """ +ADDTNL_INVALID_SCHEMA = { + "type": "x-foo-bar", + "id": "x-type--353ed279-5f4f-4a79-bffc-b2e2ed08ea1f", + "created": "2016-04-06T20:03:48.000Z", + "modified": "2016-04-06T20:03:48.000Z", +} + class IndicatorTestCases(ValidatorTest): valid_indicator = json.loads(VALID_INDICATOR) @@ -152,7 +159,7 @@ class IndicatorTestCases(ValidatorTest): def test_pattern_custom_object_prefix_lax(self): indicator = copy.deepcopy(self.valid_indicator) - indicator['pattern'] = """[x-foo":x_name = 'something']""" + indicator['pattern'] = """[x-foo:x_name = 'something']""" self.check_ignore(indicator, 'custom-prefix') def test_pattern_custom_property_prefix_strict(self): @@ -202,10 +209,14 @@ class IndicatorTestCases(ValidatorTest): self.assertTrueWithOptions(new_obj, schema_dir=self.custom_schemas) def test_additional_schemas_custom_type_invalid_schema(self): - new_obj = { - "type": "x-foo-bar", - "id": "x-type--353ed279-5f4f-4a79-bffc-b2e2ed08ea1f", - "created": "2016-04-06T20:03:48.000Z", - "modified": "2016-04-06T20:03:48.000Z", - } - self.assertFalseWithOptions(new_obj, schema_dir=self.custom_schemas) + self.assertFalseWithOptions(ADDTNL_INVALID_SCHEMA, schema_dir=self.custom_schemas) + + def test_validate_parsed_json_list_additional_invalid_schema(self): + indicator = copy.deepcopy(self.valid_indicator) + indicator['name'] = 'Foobar' + objects = [indicator, ADDTNL_INVALID_SCHEMA] + + options = ValidationOptions(schema_dir=self.custom_schemas) + results = validate_parsed_json(objects, options) + assert results[0].is_valid + assert not results[1].is_valid diff --git a/stix2validator/test/misc_tests.py b/stix2validator/test/misc_tests.py index 9209652..d97582e 100644 --- a/stix2validator/test/misc_tests.py +++ b/stix2validator/test/misc_tests.py @@ -1,27 +1,128 @@ +from io import open +import logging import os +import re import sys import pytest -from . import ValidatorTest -from .. import ValidationOptions, run_validation +from .. import (ValidationOptions, print_results, run_validation, + validate_file, validate_string) from .tool_tests import VALID_TOOL +logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s') +logger = logging.getLogger(__name__) -class MiscTestCases(ValidatorTest): +EXAMPLE = os.path.join(os.path.dirname(os.path.realpath(__file__)), + '..', 'schemas', 'examples', + 'indicator-to-campaign-relationship.json') +IDENTITY = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'test_examples', 'identity.json') +IDENTITY_CUSTOM = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'test_examples', 'identity_custom.json') +INVALID_BRACES = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'test_examples', 'invalid_braces.json') +INVALID_COMMA = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'test_examples', 'invalid_comma.json') +INVALID_IDENTITY = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'test_examples', 'invalid_identity.json') +INVALID_TIMESTAMP = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'test_examples', 'invalid_timestamp.json') - def test_run_validation(self): - inputfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), - '..', 'schemas', 'examples', - 'using-granular-markings.json') - options = ValidationOptions(files=[inputfile]) - results = run_validation(options) - assert results[0].is_valid - def test_run_validation_nonexistent_file(self): - options = ValidationOptions(files='asdf.json') - with pytest.raises(SystemExit): - run_validation(options) +def test_run_validation(caplog): + options = ValidationOptions(files=[EXAMPLE]) + results = run_validation(options) + assert results[0].is_valid + + print_results(results) + assert 'STIX JSON: Valid' in caplog.text + + +def test_run_validation_nonexistent_file(): + options = ValidationOptions(files='asdf.json') + with pytest.raises(SystemExit): + run_validation(options) + + +def test_run_validation_silent(caplog): + options = ValidationOptions(files=[EXAMPLE], silent=True) + results = run_validation(options) + print_results(results) + assert caplog.text == '' + + +def test_validate_file(caplog): + results = validate_file(EXAMPLE) + assert results.is_valid + + print_results(results) + assert 'STIX JSON: Valid' in caplog.text + + +def test_validate_file_warning(caplog): + results = validate_file(IDENTITY_CUSTOM) + assert results.is_valid + + print_results(results) + assert re.search("Custom property .+ should have a type that starts with 'x_'", caplog.text) + + +def test_validate_file_invalid_brace(caplog): + results = validate_file(INVALID_BRACES) + assert not results.is_valid + + print_results(results) + assert 'Fatal Error: Invalid JSON input' in caplog.text + + +def test_validate_file_invalid_comma(caplog): + results = validate_file(INVALID_COMMA) + assert not results.is_valid + + print_results(results) + assert 'Fatal Error: Expecting property name' in caplog.text + + +def test_validate_file_invalid_missing_modified(caplog): + results = validate_file(INVALID_IDENTITY) + assert not results.is_valid + + print_results(results) + assert "'modified' is a required property" in caplog.text + + +def test_validate_string(caplog): + with open(IDENTITY, encoding='utf-8') as f: + results = validate_string(f.read()) + assert results.is_valid + + print_results(results) + assert 'STIX JSON: Valid' in caplog.text + + +def test_validate_string_warning(caplog): + with open(IDENTITY_CUSTOM, encoding='utf-8') as f: + results = validate_string(f.read()) + assert results.is_valid + + print_results(results) + assert re.search("Custom property .+ should have a type that starts with 'x_'", caplog.text) + + +def test_validate_string_invalid_timestamp(caplog): + with open(INVALID_TIMESTAMP, encoding='utf-8') as f: + results = validate_string(f.read()) + assert not results.is_valid + + print_results(results) + assert re.search("'modified' .+ must be later or equal to 'created'", caplog.text) + + +def test_print_results_invalid_parameter(): + with pytest.raises(ValueError) as excinfo: + print_results('these results are valid') + assert 'Argument to print_results() must be' in str(excinfo) def test_run_validation_stdin(monkeypatch): diff --git a/stix2validator/test/test_examples/identity.json b/stix2validator/test/test_examples/identity.json new file mode 100644 index 0000000..91a76c5 --- /dev/null +++ b/stix2validator/test/test_examples/identity.json @@ -0,0 +1,8 @@ +{ + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "modified": "2016-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization" +} diff --git a/stix2validator/test/test_examples/identity_custom.json b/stix2validator/test/test_examples/identity_custom.json new file mode 100644 index 0000000..0101f7e --- /dev/null +++ b/stix2validator/test/test_examples/identity_custom.json @@ -0,0 +1,16 @@ +{ + "type": "bundle", + "id": "bundle--d6a999f2-849c-4d1b-aba4-4445c4444444", + "spec_version": "2.0", + "objects": [ + { + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "modified": "2016-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization", + "foo": "bar" + } + ] +} diff --git a/stix2validator/test/test_examples/identity_warning.json b/stix2validator/test/test_examples/identity_warning.json new file mode 100644 index 0000000..91a76c5 --- /dev/null +++ b/stix2validator/test/test_examples/identity_warning.json @@ -0,0 +1,8 @@ +{ + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "modified": "2016-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization" +} diff --git a/stix2validator/test/test_examples/invalid_braces.json b/stix2validator/test/test_examples/invalid_braces.json new file mode 100644 index 0000000..36e92c4 --- /dev/null +++ b/stix2validator/test/test_examples/invalid_braces.json @@ -0,0 +1,8 @@ +<{ + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "modified": "2016-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization" +}> diff --git a/stix2validator/test/test_examples/invalid_comma.json b/stix2validator/test/test_examples/invalid_comma.json new file mode 100644 index 0000000..6c0aaeb --- /dev/null +++ b/stix2validator/test/test_examples/invalid_comma.json @@ -0,0 +1,8 @@ +{ + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "modified": "2016-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization", +} diff --git a/stix2validator/test/test_examples/invalid_identity.json b/stix2validator/test/test_examples/invalid_identity.json new file mode 100644 index 0000000..11d11ac --- /dev/null +++ b/stix2validator/test/test_examples/invalid_identity.json @@ -0,0 +1,7 @@ +{ + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization" +} diff --git a/stix2validator/test/test_examples/invalid_timestamp.json b/stix2validator/test/test_examples/invalid_timestamp.json new file mode 100644 index 0000000..b4d379f --- /dev/null +++ b/stix2validator/test/test_examples/invalid_timestamp.json @@ -0,0 +1,8 @@ +{ + "type": "identity", + "id": "identity--8c6af861-7b20-41ef-9b59-6344fd872a8f", + "created": "2016-08-08T15:50:10.983Z", + "modified": "2015-08-08T15:50:10.983Z", + "name": "Franistan Intelligence", + "identity_class": "organization" +}
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 5 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 antlr4-python3-runtime==4.9.3 appdirs==1.4.4 attrs==21.4.0 Babel==2.11.0 bump2version==1.0.1 bumpversion==0.6.0 certifi==2021.5.30 cfgv==3.3.1 charset-normalizer==2.0.12 colorama==0.4.5 coverage==6.2 distlib==0.3.9 docutils==0.18.1 filelock==3.4.1 identify==2.4.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.2.3 iniconfig==1.1.1 itsdangerous==2.0.1 Jinja2==3.0.3 jsonschema==2.5.1 MarkupSafe==2.0.1 nodeenv==1.6.0 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 pre-commit==2.17.0 py==1.11.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 requests==2.27.1 requests-cache==0.7.5 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-prompt==1.5.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 stix2-patterns==2.0.0 -e git+https://github.com/oasis-open/cti-stix-validator.git@e47ffb22958eb81d60b293f3efcc2fab33827f14#egg=stix2_validator toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 url-normalize==1.4.3 urllib3==1.26.20 virtualenv==20.16.2 zipp==3.6.0
name: cti-stix-validator channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - antlr4-python3-runtime==4.9.3 - appdirs==1.4.4 - attrs==21.4.0 - babel==2.11.0 - bump2version==1.0.1 - bumpversion==0.6.0 - cfgv==3.3.1 - charset-normalizer==2.0.12 - colorama==0.4.5 - coverage==6.2 - distlib==0.3.9 - docutils==0.18.1 - filelock==3.4.1 - identify==2.4.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.2.3 - iniconfig==1.1.1 - itsdangerous==2.0.1 - jinja2==3.0.3 - jsonschema==2.5.1 - markupsafe==2.0.1 - nodeenv==1.6.0 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - pre-commit==2.17.0 - py==1.11.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - requests==2.27.1 - requests-cache==0.7.5 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-prompt==1.5.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - stix2-patterns==2.0.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - url-normalize==1.4.3 - urllib3==1.26.20 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/cti-stix-validator
[ "stix2validator/test/misc_tests.py::test_validate_file_invalid_brace", "stix2validator/test/misc_tests.py::test_validate_file_invalid_comma", "stix2validator/test/misc_tests.py::test_print_results_invalid_parameter" ]
[ "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_additional_schemas", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_additional_schemas_custom_type", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_custom_object_noprefix", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_custom_object_prefix_lax", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_custom_object_prefix_strict", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_custom_property_prefix_strict", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_list_object_property", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_with_escaped_slashes", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_validate_parsed_json_list_additional_invalid_schema", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_vocab_indicator_label", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_wellformed_indicator", "stix2validator/test/misc_tests.py::test_run_validation", "stix2validator/test/misc_tests.py::test_run_validation_silent", "stix2validator/test/misc_tests.py::test_validate_file", "stix2validator/test/misc_tests.py::test_validate_file_warning", "stix2validator/test/misc_tests.py::test_validate_file_invalid_missing_modified", "stix2validator/test/misc_tests.py::test_validate_string", "stix2validator/test/misc_tests.py::test_validate_string_warning", "stix2validator/test/misc_tests.py::test_validate_string_invalid_timestamp", "stix2validator/test/misc_tests.py::test_run_validation_stdin" ]
[ "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_additional_schemas_custom_type_invalid_schema", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_custom_property_name_invalid_character", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_custom_property_name_long", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_custom_property_name_short", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_custom_property_name_strict", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_empty_list", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_id_type", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_invalid_pattern", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_modified_before_created", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_pattern_custom_invalid_format", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_object_type_incident", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_object_type_infrastructure", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_property_action", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_property_addresses", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_property_confidence", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_property_phone_numbers", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_property_severity", "stix2validator/test/indicator_tests.py::IndicatorTestCases::test_reserved_property_usernames", "stix2validator/test/misc_tests.py::test_run_validation_nonexistent_file" ]
[]
BSD 3-Clause "New" or "Revised" License
2,815
[ "stix2validator/schemas", "stix2validator/output.py", "stix2validator/__init__.py", "stix2validator/scripts/stix2_validator.py", "stix2validator/validator.py" ]
[ "stix2validator/schemas", "stix2validator/output.py", "stix2validator/__init__.py", "stix2validator/scripts/stix2_validator.py", "stix2validator/validator.py" ]
numba__numba-3151
5215237f67497c77d85642ee917b1553560bc194
2018-07-23 21:43:16
e74336755bbba68d800985bd470fa35b9d27d3d3
diff --git a/docs/source/reference/envvars.rst b/docs/source/reference/envvars.rst index 3041af0f2..45e7cdf70 100644 --- a/docs/source/reference/envvars.rst +++ b/docs/source/reference/envvars.rst @@ -235,6 +235,20 @@ Compilation options portable code (portable within the same architecture and OS), simply set ``NUMBA_CPU_NAME=generic``. +.. envvar:: NUMBA_FUNCTION_CACHE_SIZE + + Override the size of the function cache for retaining recently + deserialized functions in memory. In systems like + `Dask <http://dask.pydata.org>`_, it is common for functions to be deserialized + multiple times. Numba will cache functions as long as there is a + reference somewhere in the interpreter. This cache size variable controls + how many functions that are no longer referenced will also be retained, + just in case they show up in the future. The implementation of this is + not a true LRU, but the large size of the cache should be sufficient for + most situations. + + *Default value:* 128 + GPU support ----------- diff --git a/numba/config.py b/numba/config.py index defaaba98..b7f3a4e8d 100644 --- a/numba/config.py +++ b/numba/config.py @@ -152,6 +152,10 @@ class _EnvReloader(object): # Enable debugging of front-end operation (up to and including IR generation) DEBUG_FRONTEND = _readenv("NUMBA_DEBUG_FRONTEND", int, 0) + # How many recently deserialized functions to retain regardless + # of external references + FUNCTION_CACHE_SIZE = _readenv("NUMBA_FUNCTION_CACHE_SIZE", int, 128) + # Enable logging of cache operation DEBUG_CACHE = _readenv("NUMBA_DEBUG_CACHE", int, DEBUG) diff --git a/numba/dispatcher.py b/numba/dispatcher.py index 471ea592b..58f52e4c4 100644 --- a/numba/dispatcher.py +++ b/numba/dispatcher.py @@ -514,6 +514,9 @@ class Dispatcher(_DispatcherBase): } # A {uuid -> instance} mapping, for deserialization _memo = weakref.WeakValueDictionary() + # hold refs to last N functions deserialized, retaining them in _memo + # regardless of whether there is another reference + _recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE) __uuid = None __numba__ = 'py_func' @@ -624,6 +627,7 @@ class Dispatcher(_DispatcherBase): assert self.__uuid is None self.__uuid = u self._memo[u] = self + self._recent.append(self) def compile(self, sig): if not self._can_compile: diff --git a/numba/extending.py b/numba/extending.py index 623dc8138..72451f20c 100644 --- a/numba/extending.py +++ b/numba/extending.py @@ -2,8 +2,9 @@ import inspect import uuid import weakref +import collections -from numba import types +from numba import types, config # Exported symbols from .typing.typeof import typeof_impl @@ -216,6 +217,10 @@ class _Intrinsic(object): Dummy callable for intrinsic """ _memo = weakref.WeakValueDictionary() + # hold refs to last N functions deserialized, retaining them in _memo + # regardless of whether there is another reference + _recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE) + __uuid = None def __init__(self, name, defn, support_literals=False): @@ -241,6 +246,7 @@ class _Intrinsic(object): assert self.__uuid is None self.__uuid = u self._memo[u] = self + self._recent.append(self) def _register(self): from .typing.templates import make_intrinsic_template, infer_global
Slow performance on repeated deserialization I'm running into performance problems when I deserialize a numba function many times within the same process. I have a small example here: ### Process 1 ```python import numba @numba.njit def f(x): total = 0 for i in range(len(x)): total += x[i] return total import cloudpickle cloudpickle.dumps(f) ``` ``` b'\x80\x04\x95\xa8\x01\x00\x00\x00\x00\x00\x00\x8c\x0fnumba.serialize\x94\x8c\x12_rebuild_reduction\x94\x93\x94(\x8c\x16numba.targets.registry\x94\x8c\rCPUDispatcher\x94\x93\x94\x8c$64ebbc90-6b3a-11e8-963f-49110aa33909\x94(K\x04C\x043\r\r\n\x94C\xa4\xe3\x01\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s*\x00\x00\x00d\x01}\x01x t\x00t\x01|\x00\x83\x01\x83\x01D\x00]\x10}\x02|\x01|\x00|\x02\x19\x007\x00}\x01q\x12W\x00|\x01S\x00)\x02N\xe9\x00\x00\x00\x00)\x02\xda\x05range\xda\x03len)\x03\xda\x01x\xda\x05total\xda\x01i\xa9\x00r\x07\x00\x00\x00\xfa\x1e<ipython-input-2-359c566f9f92>\xda\x01f\x01\x00\x00\x00s\x08\x00\x00\x00\x00\x02\x04\x01\x12\x01\x10\x01\x94\x87\x94}\x94(\x8c\x05range\x94\x8c\x08builtins\x94\x8c\x05range\x94\x93\x94\x8c\x03len\x94\x8c\x08builtins\x94\x8c\x03len\x94\x93\x94\x8c\x08__name__\x94\x8c\x08__main__\x94u\x8c\x01f\x94Nt\x94}\x94}\x94\x8c\x08nopython\x94\x88s\x8c\x06direct\x94\x88]\x94t\x94R\x94.' ``` ### Process 2 ```python import pickle import numpy as np x = np.random.random(1000) b = b'\x80\x04\x95\xa8\x01\x00\x00\x00\x00\x00\x00\x8c\x0fnumba.serialize\x94\x8c\x12_rebuild_reduction\x94\x93\x94(\x8c\x16numba.targets.registry\x94\x8c\rCPUDispatcher\x94\x93\x94\x8c$64ebbc90-6b3a-11e8-963f-49110aa33909\x94(K\x04C\x043\r\r\n\x94C\xa4\xe3\x01\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s*\x00\x00\x00d\x01}\x01x t\x00t\x01|\x00\x83\x01\x83\x01D\x00]\x10}\x02|\x01|\x00|\x02\x19\x007\x00}\x01q\x12W\x00|\x01S\x00)\x02N\xe9\x00\x00\x00\x00)\x02\xda\x05range\xda\x03len)\x03\xda\x01x\xda\x05total\xda\x01i\xa9\x00r\x07\x00\x00\x00\xfa\x1e<ipython-input-2-359c566f9f92>\xda\x01f\x01\x00\x00\x00s\x08\x00\x00\x00\x00\x02\x04\x01\x12\x01\x10\x01\x94\x87\x94}\x94(\x8c\x05range\x94\x8c\x08builtins\x94\x8c\x05range\x94\x93\x94\x8c\x03len\x94\x8c\x08builtins\x94\x8c\x03len\x94\x93\x94\x8c\x08__name__\x94\x8c\x08__main__\x94u\x8c\x01f\x94Nt\x94}\x94}\x94\x8c\x08nopython\x94\x88s\x8c\x06direct\x94\x88]\x94t\x94R\x94.' ``` We see that repeated calls of deserializing and then calling the function remain somewhat slow at 60ms per call. ```python In [5]: %time pickle.loads(b)(x) CPU times: user 255 ms, sys: 23.8 ms, total: 279 ms Wall time: 281 ms Out[5]: 515.6796588752262 In [6]: %time pickle.loads(b)(x) CPU times: user 63.4 ms, sys: 3.84 ms, total: 67.2 ms Wall time: 66.2 ms Out[6]: 515.6796588752262 In [7]: %time pickle.loads(b)(x) CPU times: user 68.3 ms, sys: 0 ns, total: 68.3 ms Wall time: 67.1 ms Out[7]: 515.6796588752262 ``` The cost of deserializing is relatively low ```python In [8]: %time f = pickle.loads(b) CPU times: user 619 µs, sys: 66 µs, total: 685 µs Wall time: 696 µs ``` And if we call this function many times then things are ok ```python In [9]: %time f(x) CPU times: user 65.9 ms, sys: 3.42 ms, total: 69.3 ms Wall time: 68.6 ms Out[9]: 515.6796588752262 In [10]: %time f(x) CPU times: user 25 µs, sys: 2 µs, total: 27 µs Wall time: 34.6 µs Out[10]: 515.6796588752262 In [11]: %time f(x) CPU times: user 36 µs, sys: 3 µs, total: 39 µs Wall time: 52.7 µs Out[11]: 515.6796588752262 ``` So my guess is that we should be deduplicating things in some way. Is this in scope for Numba to resolve or is this something that I should be handling on my end?
numba/numba
diff --git a/numba/tests/test_dispatcher.py b/numba/tests/test_dispatcher.py index 38b8c862d..38d657a72 100644 --- a/numba/tests/test_dispatcher.py +++ b/numba/tests/test_dispatcher.py @@ -9,6 +9,8 @@ import sys import threading import warnings import inspect +import pickle +import weakref try: import jinja2 @@ -26,6 +28,7 @@ from .support import (TestCase, tag, temp_directory, import_dynamic, override_env_config, capture_cache_log, captured_stdout) from numba.targets import codegen from numba.caching import _UserWideCacheLocator +from numba.dispatcher import Dispatcher import llvmlite.binding as ll @@ -337,6 +340,50 @@ class TestDispatcher(BaseTest): [cr] = bar.overloads.values() self.assertEqual(len(cr.lifted), 1) + def test_serialization(self): + """ + Test serialization of Dispatcher objects + """ + @jit(nopython=True) + def foo(x): + return x + 1 + + self.assertEqual(foo(1), 2) + + # get serialization memo + memo = Dispatcher._memo + Dispatcher._recent.clear() + memo_size = len(memo) + + # pickle foo and check memo size + serialized_foo = pickle.dumps(foo) + # increases the memo size + self.assertEqual(memo_size + 1, len(memo)) + + # unpickle + foo_rebuilt = pickle.loads(serialized_foo) + self.assertEqual(memo_size + 1, len(memo)) + + self.assertIs(foo, foo_rebuilt) + + # do we get the same object even if we delete all the explict references? + id_orig = id(foo_rebuilt) + del foo + del foo_rebuilt + self.assertEqual(memo_size + 1, len(memo)) + new_foo = pickle.loads(serialized_foo) + self.assertEqual(id_orig, id(new_foo)) + + # now clear the recent cache + ref = weakref.ref(new_foo) + del new_foo + Dispatcher._recent.clear() + self.assertEqual(memo_size, len(memo)) + + # show that deserializing creates a new object + newer_foo = pickle.loads(serialized_foo) + self.assertIs(ref(), None) + class TestSignatureHandling(BaseTest): """ diff --git a/numba/tests/test_extending.py b/numba/tests/test_extending.py index 3b377ce1b..565726ccb 100644 --- a/numba/tests/test_extending.py +++ b/numba/tests/test_extending.py @@ -676,7 +676,13 @@ class TestIntrinsic(TestCase): memo_size += 1 self.assertEqual(memo_size, len(memo)) del original # remove original before unpickling - # by deleting, the memo entry is removed + + # by deleting, the memo entry is NOT removed due to recent + # function queue + self.assertEqual(memo_size, len(memo)) + + # Manually force clear of _recent queue + _Intrinsic._recent.clear() memo_size -= 1 self.assertEqual(memo_size, len(memo))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 4 }
0.40
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc g++" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi exceptiongroup==1.2.2 importlib-metadata==6.7.0 iniconfig==2.0.0 llvmlite==0.39.1 -e git+https://github.com/numba/numba.git@5215237f67497c77d85642ee917b1553560bc194#egg=numba numpy==1.21.6 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0
name: numba channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argparse==1.4.0 - exceptiongroup==1.2.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - llvmlite==0.39.1 - numpy==1.21.6 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/numba
[ "numba/tests/test_extending.py::TestIntrinsic::test_deserialization" ]
[ "numba/tests/test_dispatcher.py::TestDispatcher::test_ambiguous_new_version", "numba/tests/test_dispatcher.py::TestDispatcher::test_coerce_input_types", "numba/tests/test_dispatcher.py::TestDispatcher::test_disabled_compilation", "numba/tests/test_dispatcher.py::TestDispatcher::test_disabled_compilation_nested_call", "numba/tests/test_dispatcher.py::TestDispatcher::test_disabled_compilation_through_list", "numba/tests/test_dispatcher.py::TestDispatcher::test_dyn_pyfunc", "numba/tests/test_dispatcher.py::TestDispatcher::test_explicit_signatures", "numba/tests/test_dispatcher.py::TestDispatcher::test_fingerprint_failure", "numba/tests/test_dispatcher.py::TestDispatcher::test_lock", "numba/tests/test_dispatcher.py::TestDispatcher::test_matching_error_message", "numba/tests/test_dispatcher.py::TestDispatcher::test_no_argument", "numba/tests/test_dispatcher.py::TestDispatcher::test_serialization", "numba/tests/test_dispatcher.py::TestDispatcher::test_signature_mismatch", "numba/tests/test_dispatcher.py::TestSignatureHandling::test_default_args", "numba/tests/test_dispatcher.py::TestSignatureHandling::test_named_args", "numba/tests/test_dispatcher.py::TestSignatureHandling::test_star_args", "numba/tests/test_dispatcher.py::TestSignatureHandlingObjectMode::test_default_args", "numba/tests/test_dispatcher.py::TestSignatureHandlingObjectMode::test_named_args", "numba/tests/test_dispatcher.py::TestSignatureHandlingObjectMode::test_star_args", "numba/tests/test_dispatcher.py::TestGeneratedDispatcher::test_generated", "numba/tests/test_dispatcher.py::TestGeneratedDispatcher::test_signature_errors", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_inspect_asm", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_inspect_cfg", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_inspect_cfg_with_python_wrapper", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_inspect_llvm", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_inspect_types", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_issue_with_array_layout_conflict", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_recompile", "numba/tests/test_dispatcher.py::TestDispatcherMethods::test_recompile_signatures", "numba/tests/test_dispatcher.py::TestCache::test_big_array", "numba/tests/test_dispatcher.py::TestCache::test_cache_invalidate", "numba/tests/test_dispatcher.py::TestCache::test_cache_reuse", "numba/tests/test_dispatcher.py::TestCache::test_caching", "numba/tests/test_dispatcher.py::TestCache::test_caching_nrt_pruned", "numba/tests/test_dispatcher.py::TestCache::test_closure", "numba/tests/test_dispatcher.py::TestCache::test_ctypes", "numba/tests/test_dispatcher.py::TestCache::test_inner_then_outer", "numba/tests/test_dispatcher.py::TestCache::test_looplifted", "numba/tests/test_dispatcher.py::TestCache::test_no_caching", "numba/tests/test_dispatcher.py::TestCache::test_outer_then_inner", "numba/tests/test_dispatcher.py::TestCache::test_recompile", "numba/tests/test_dispatcher.py::TestCache::test_same_names", "numba/tests/test_dispatcher.py::TestCacheWithCpuSetting::test_user_set_cpu_features", "numba/tests/test_dispatcher.py::TestCacheWithCpuSetting::test_user_set_cpu_name", "numba/tests/test_dispatcher.py::TestMultiprocessCache::test_multiprocessing", "numba/tests/test_dispatcher.py::TestCacheFileCollision::test_file_location", "numba/tests/test_dispatcher.py::TestCacheFileCollision::test_no_collision", "numba/tests/test_dispatcher.py::TestDispatcherFunctionBoundaries::test_dispatcher_as_arg_usecase", "numba/tests/test_dispatcher.py::TestDispatcherFunctionBoundaries::test_dispatcher_cannot_return_to_python", "numba/tests/test_dispatcher.py::TestDispatcherFunctionBoundaries::test_dispatcher_in_sequence_arg", "numba/tests/test_dispatcher.py::TestDispatcherFunctionBoundaries::test_pass_dispatcher_as_arg", "numba/tests/test_dispatcher.py::TestBoxingDefaultError::test_box_runtime_error", "numba/tests/test_dispatcher.py::TestBoxingDefaultError::test_unbox_runtime_error", "numba/tests/test_extending.py::TestLowLevelExtending::test_cast_mydummy", "numba/tests/test_extending.py::TestLowLevelExtending::test_func1", "numba/tests/test_extending.py::TestLowLevelExtending::test_func1_isolated", "numba/tests/test_extending.py::TestPandasLike::test_index_get_data", "numba/tests/test_extending.py::TestPandasLike::test_index_getitem", "numba/tests/test_extending.py::TestPandasLike::test_index_is_monotonic", "numba/tests/test_extending.py::TestPandasLike::test_index_len", "numba/tests/test_extending.py::TestPandasLike::test_index_ufunc", "numba/tests/test_extending.py::TestPandasLike::test_series_clip", "numba/tests/test_extending.py::TestPandasLike::test_series_constructor", "numba/tests/test_extending.py::TestPandasLike::test_series_get_index", "numba/tests/test_extending.py::TestPandasLike::test_series_len", "numba/tests/test_extending.py::TestPandasLike::test_series_ufunc", "numba/tests/test_extending.py::TestHighLevelExtending::test_len", "numba/tests/test_extending.py::TestHighLevelExtending::test_no_cpython_wrapper", "numba/tests/test_extending.py::TestHighLevelExtending::test_print", "numba/tests/test_extending.py::TestHighLevelExtending::test_where", "numba/tests/test_extending.py::TestOverloadMethodCaching::test_caching_overload_method", "numba/tests/test_extending.py::TestIntrinsic::test_ll_pointer_cast", "numba/tests/test_extending.py::TestIntrinsic::test_serialization", "numba/tests/test_extending.py::TestRegisterJitable::test_flags_no_nrt", "numba/tests/test_extending.py::TestRegisterJitable::test_no_flags" ]
[ "numba/tests/test_dispatcher.py::TestCache::test_frozen", "numba/tests/test_extending.py::TestImportCythonFunction::test_missing_module" ]
[]
BSD 2-Clause "Simplified" License
2,817
[ "numba/config.py", "numba/dispatcher.py", "docs/source/reference/envvars.rst", "numba/extending.py" ]
[ "numba/config.py", "numba/dispatcher.py", "docs/source/reference/envvars.rst", "numba/extending.py" ]
pypa__setuptools_scm-287
6f2703342a7060c4673117e54d9a0c133a07211e
2018-07-24 07:11:27
3ae1cad231545abfeedea9aaa7405e15fb28d95c
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ea83b4e..ad7ca4f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,9 @@ +v3.0.3 +====== + +* fix #286 - duo an oversight a helper functio nwas returning a generator instead of a list + + v3.0.2 ====== diff --git a/src/setuptools_scm/version.py b/src/setuptools_scm/version.py index 47ad6d3..b2ad2f7 100644 --- a/src/setuptools_scm/version.py +++ b/src/setuptools_scm/version.py @@ -30,12 +30,12 @@ def _parse_version_tag(tag, config): if len(match.groups()) == 1: key = 1 else: - key = 'version' - + key = "version" + result = { - 'version': match.group(key), - 'prefix': match.group(0)[:match.start(key)], - 'suffix': match.group(0)[match.end(key):], + "version": match.group(key), + "prefix": match.group(0)[:match.start(key)], + "suffix": match.group(0)[match.end(key):], } trace("tag '%s' parsed to %s" % (tag, result)) @@ -88,20 +88,21 @@ def tag_to_version(tag, config=None): config = Configuration() tagdict = _parse_version_tag(tag, config) - if not isinstance(tagdict, dict) or not tagdict.get('version', None): + if not isinstance(tagdict, dict) or not tagdict.get("version", None): warnings.warn("tag %r no version found" % (tag,)) return None - version = tagdict['version'] + version = tagdict["version"] trace("version pre parse", version) - if tagdict.get('suffix', ''): - warnings.warn("tag %r will be stripped of its suffix '%s'" % (tag, tagdict['suffix'])) + if tagdict.get("suffix", ""): + warnings.warn( + "tag %r will be stripped of its suffix '%s'" % (tag, tagdict["suffix"]) + ) if VERSION_CLASS is not None: version = pkg_parse_version(version) trace("version", repr(version)) - return version @@ -111,7 +112,12 @@ def tags_to_versions(tags, config=None): :param tags: an iterable of tags :param config: optional configuration object """ - return filter(None, map(lambda tag: tag_to_version(tag, config=config), tags)) + result = [] + for tag in tags: + tag = tag_to_version(tag, config=config) + if tag: + result.append(tag) + return result class ScmVersion(object): @@ -176,9 +182,14 @@ def _parse_tag(tag, preformatted, config): return tag -def meta(tag, distance=None, dirty=False, node=None, preformatted=False, config=None, **kw): +def meta( + tag, distance=None, dirty=False, node=None, preformatted=False, config=None, **kw +): if not config: - warnings.warn("meta invoked without explicit configuration, will use defaults where required.") + warnings.warn( + "meta invoked without explicit configuration," + " will use defaults where required." + ) parsed_version = _parse_tag(tag, preformatted, config) trace("version", tag, "->", parsed_version) assert parsed_version is not None, "cant parse version %s" % tag
in version 3 tags_to_versions returns a iterator instead of a list I get this issue when using `setuptools>=3` and `<=3.0.2` ``` (venv) macbeth $ pip install --no-binary :all: cheroot Looking in indexes: http://artifactory.factset.com/artifactory/api/pypi/python/simple/ Collecting cheroot Downloading http://artifactory.factset.com/artifactory/api/pypi/python/packages/a3/fe/60797128186577348abc612fdd1011e737d7f01137c0c927dba489360fc3/cheroot-6.3.3.tar.gz (73kB) 100% |████████████████████████████████| 81kB 65.5MB/s Complete output from command python setup.py egg_info: Traceback (most recent call last): File "<string>", line 1, in <module> File "/tmp/pip-install-otm5evwb/cheroot/setup.py", line 113, in <module> setuptools.setup(**params) File "/REDACTED_DIR/venv/lib/python3.6/site-packages/setuptools/__init__.py", line 131, in setup return distutils.core.setup(**attrs) File "/home/user/macbeth/.local/share/pyenv/versions/3.6.4/lib/python3.6/distutils/core.py", line 108, in setup _setup_distribution = dist = klass(attrs) File "/REDACTED_DIR/venv/lib/python3.6/site-packages/setuptools/dist.py", line 370, in __init__ k: v for k, v in attrs.items() File "/home/user/macbeth/.local/share/pyenv/versions/3.6.4/lib/python3.6/distutils/dist.py", line 281, in __init__ self.finalize_options() File "/REDACTED_DIR/venv/lib/python3.6/site-packages/setuptools/dist.py", line 529, in finalize_options ep.load()(self, ep.name, value) File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm-3.0.2-py3.6.egg/setuptools_scm/integration.py", line 23, in version_keyword dist.metadata.version = get_version(**value) File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm-3.0.2-py3.6.egg/setuptools_scm/__init__.py", line 135, in get_version parsed_version = _do_parse(config) File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm-3.0.2-py3.6.egg/setuptools_scm/__init__.py", line 88, in _do_parse config, "setuptools_scm.parse_scm" File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm-3.0.2-py3.6.egg/setuptools_scm/__init__.py", line 45, in _version_from_entrypoint version = _call_entrypoint_fn(config, ep.load()) File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm-3.0.2-py3.6.egg/setuptools_scm/__init__.py", line 40, in _call_entrypoint_fn return fn(config.absolute_root) File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm_git_archive-1.0-py3.6.egg/setuptools_scm_git_archive/__init__.py", line 21, in parse return archival_to_version(data) File "/tmp/pip-install-otm5evwb/cheroot/.eggs/setuptools_scm_git_archive-1.0-py3.6.egg/setuptools_scm_git_archive/__init__.py", line 15, in archival_to_version return meta(versions[0]) TypeError: 'filter' object is not subscriptable ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in /tmp/pip-install-otm5evwb/cheroot/ (venv) macbeth $ pip freeze --all pip==18.0 setuptools==40.0.0 (venv) macbeth $ python --version Python 3.6.4 ```
pypa/setuptools_scm
diff --git a/testing/test_version.py b/testing/test_version.py index 4de545c..31f33f7 100644 --- a/testing/test_version.py +++ b/testing/test_version.py @@ -1,6 +1,6 @@ import pytest from setuptools_scm.config import Configuration -from setuptools_scm.version import meta, simplified_semver_version +from setuptools_scm.version import meta, simplified_semver_version, tags_to_versions @pytest.mark.parametrize( @@ -49,6 +49,13 @@ def test_next_semver(version, expected_next): ], ) def test_tag_regex1(tag, expected): - Configuration().tag_regex = r'^(?P<prefix>v)?(?P<version>[^\+]+)(?P<suffix>.*)?$' + Configuration().tag_regex = r"^(?P<prefix>v)?(?P<version>[^\+]+)(?P<suffix>.*)?$" result = meta(tag) assert result.tag.public == expected + + [email protected]("https://github.com/pypa/setuptools_scm/issues/286") +def test_tags_to_versions(): + config = Configuration() + versions = tags_to_versions(["1", "2", "3"], config=config) + assert isinstance(versions, list) # enable subscription
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
3.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 -e git+https://github.com/pypa/setuptools_scm.git@6f2703342a7060c4673117e54d9a0c133a07211e#egg=setuptools_scm tomli==2.2.1
name: setuptools_scm channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/setuptools_scm
[ "testing/test_version.py::test_tags_to_versions" ]
[]
[ "testing/test_version.py::test_next_semver[exact]", "testing/test_version.py::test_next_semver[short_tag]", "testing/test_version.py::test_next_semver[normal_branch]", "testing/test_version.py::test_next_semver[normal_branch_short_tag]", "testing/test_version.py::test_next_semver[feature_branch]", "testing/test_version.py::test_next_semver[feature_branch_short_tag]", "testing/test_version.py::test_next_semver[feature_in_branch]", "testing/test_version.py::test_tag_regex1[v1.0.0-1.0.0]", "testing/test_version.py::test_tag_regex1[v1.0.0-rc.1-1.0.0rc1]", "testing/test_version.py::test_tag_regex1[v1.0.0-rc.1+-25259o4382757gjurh54-1.0.0rc1]" ]
[]
MIT License
2,819
[ "CHANGELOG.rst", "src/setuptools_scm/version.py" ]
[ "CHANGELOG.rst", "src/setuptools_scm/version.py" ]
G-Node__python-odml-296
a75d1db5ddb02cf7414b17ed41ef97a80c6245fb
2018-07-24 14:15:07
1083db44a4e2fc29f5730fa74ab265dece6facd1
diff --git a/.travis.yml b/.travis.yml index 6c8e347..9b80df5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,10 +15,6 @@ matrix: - os: linux python: "3.6" - - os: osx - language: generic - env: - - OSXENV=3.5.0 - os: osx language: generic env: diff --git a/odml/tools/version_converter.py b/odml/tools/version_converter.py index 47539cb..86e4011 100644 --- a/odml/tools/version_converter.py +++ b/odml/tools/version_converter.py @@ -52,6 +52,9 @@ class VersionConverter(object): if elem in doc: doc = doc.replace(elem, val) + # Make sure encoding is present for the xml parser + doc = doc.encode('utf-8') + # Make pretty print available by resetting format parser = ET.XMLParser(remove_blank_text=True) tree = ET.ElementTree(ET.fromstring(doc, parser)) @@ -300,7 +303,7 @@ class VersionConverter(object): if value.text: if main_val.text: - main_val.text += ", " + value.text.strip() + main_val.text += "," + value.text.strip() multiple_values = True else: main_val.text = value.text.strip() diff --git a/odml/tools/xmlparser.py b/odml/tools/xmlparser.py index 1a4f240..4a04e23 100644 --- a/odml/tools/xmlparser.py +++ b/odml/tools/xmlparser.py @@ -27,11 +27,15 @@ except NameError: def to_csv(val): - unicode_values = list(map(unicode, val)) + # Make sure all individual values do not contain + # leading or trailing whitespaces. + unicode_values = list(map(unicode.strip, map(unicode, val))) stream = StringIO() writer = csv.writer(stream, dialect="excel") writer.writerow(unicode_values) - csv_string = stream.getvalue().strip() + # Strip any csv.writer added carriage return line feeds + # and double quotes before saving. + csv_string = stream.getvalue().strip().strip('"') if len(unicode_values) > 1: csv_string = "[" + csv_string + "]" return csv_string @@ -40,8 +44,14 @@ def to_csv(val): def from_csv(value_string): if not value_string: return [] - if value_string[0] == "[": + if value_string[0] == "[" and value_string[-1] == "]": value_string = value_string[1:-1] + else: + # This is a single string entry, any comma contained + # is part of the value and must not be used to + # split up the string. + return [value_string] + if not value_string: return [] stream = StringIO(value_string)
VersionConverter encoding problem With Python3, the VersionConverter encounters a `ValueError: Unicode strings with encoding declaration are not supported` when the xml tag of the to be converted file contains the `encoding="UTF-8"` attribute.
G-Node/python-odml
diff --git a/test/test_version_converter.py b/test/test_version_converter.py index 48b248e..29c28ab 100644 --- a/test/test_version_converter.py +++ b/test/test_version_converter.py @@ -130,7 +130,7 @@ class TestVersionConverter(unittest.TestCase): self.assertEqual(val_elems[0].find("unit"), None) self.assertEqual(val_elems[0].find("type"), None) self.assertEqual(val_elems[0].find("uncertainty"), None) - self.assertEqual(val_elems[0].text, "[0, 45]") + self.assertEqual(val_elems[0].text, "[0,45]") self.assertEqual(prop.find("unit").text, "deg") self.assertEqual(len(prop.findall("unit")), 1) self.assertEqual(prop.find("type").text, "int") @@ -481,6 +481,37 @@ class TestVersionConverter(unittest.TestCase): </value> </property> + <property> + <value>Single, string, value, with, many, commata.<type>string</type></value> + <name>testSingleString</name> + </property> + + <property> + <value>A<type>string</type></value> + <value>B<type>string</type></value> + <value>C<type>string</type></value> + <name>testStringList</name> + </property> + + <property> + <value> Single string value with wrapping whitespace <type>string</type></value> + <name>testStringWhiteSpace</name> + </property> + + <property> + <value> Multiple Strings <type>string</type></value> + <value> with wrapping <type>string</type></value> + <value> Whitespace <type>string</type></value> + <name>testStringListWhiteSpace</name> + </property> + + <property> + <value> 1 <type>int</type></value> + <value> 2 <type>int</type></value> + <value> 3 <type>int</type></value> + <name>testIntListWhiteSpace</name> + </property> + </section> </odML> """ @@ -490,7 +521,7 @@ class TestVersionConverter(unittest.TestCase): conv_doc = vc._convert(vc._parse_xml()) root = conv_doc.getroot() sec = root.find("section") - self.assertEqual(len(sec), 9) + self.assertEqual(len(sec), 14) # Test single value export prop = sec.findall("property")[0] @@ -500,7 +531,7 @@ class TestVersionConverter(unittest.TestCase): # Test multiple value export prop = sec.findall("property")[1] self.assertEqual(len(prop), 2) - self.assertEqual(prop.find("value").text, "[1, 2, 3]") + self.assertEqual(prop.find("value").text, "[1,2,3]") # Test empty value export prop = sec.findall("property")[2] @@ -521,7 +552,7 @@ class TestVersionConverter(unittest.TestCase): # Test valid multiple Value tag export prop = sec.findall("property")[4] self.assertEqual(len(prop), 7) - self.assertEqual(prop.find("value").text, "[0.1, 0.2, 3]") + self.assertEqual(prop.find("value").text, "[0.1,0.2,3]") self.assertEqual(prop.find("type").text, "float") self.assertEqual(prop.find("uncertainty").text, "0.05") self.assertEqual(prop.find("unit").text, "mV") @@ -541,6 +572,35 @@ class TestVersionConverter(unittest.TestCase): self.assertEqual(prop.find("name").text, "Unsupported binary value dtype replace") self.assertEqual(prop.find("type").text, "text") + # Test single string value with commata + prop = sec.findall("property")[8] + self.assertEqual(prop.find("name").text, "testSingleString") + self.assertEqual(prop.find("value").text, + "Single, string, value, with, many, commata.") + + # Test string list import + prop = sec.findall("property")[9] + self.assertEqual(prop.find("name").text, "testStringList") + self.assertEqual(prop.find("value").text, "[A,B,C]") + + # Test single string values wrapping whitespace removal + prop = sec.findall("property")[10] + self.assertEqual(prop.find("name").text, "testStringWhiteSpace") + self.assertEqual(prop.find("value").text, + "Single string value with wrapping whitespace") + + # Test multiple string values with wrapping whitespace removal + prop = sec.findall("property")[11] + self.assertEqual(prop.find("name").text, "testStringListWhiteSpace") + self.assertEqual(prop.find("value").text, + "[Multiple Strings,with wrapping,Whitespace]") + + # Test multiple int values with wrapping whitespaces + prop = sec.findall("property")[12] + self.assertEqual(prop.find("name").text, "testIntListWhiteSpace") + self.assertEqual(prop.find("type").text, "int") + self.assertEqual(prop.find("value").text, "[1,2,3]") + def test_parse_dict_document(self): # Test appending tags; not appending empty sections doc_dict = {'Document': {'author': 'HPL', 'sections': []}}
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 3 }
1.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y libxml2-dev libxslt1-dev lib32z1-dev" ], "python": "3.5", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 isodate==0.6.1 lxml==5.3.1 -e git+https://github.com/G-Node/python-odml.git@a75d1db5ddb02cf7414b17ed41ef97a80c6245fb#egg=odML packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 PyYAML==3.12 rdflib==5.0.0 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: python-odml channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isodate==0.6.1 - lxml==5.3.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pyyaml==3.12 - rdflib==5.0.0 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/python-odml
[ "test/test_version_converter.py::TestVersionConverter::test_convert_odml_file", "test/test_version_converter.py::TestVersionConverter::test_convert_odml_file_value" ]
[]
[ "test/test_version_converter.py::TestVersionConverter::test_convert_json_file", "test/test_version_converter.py::TestVersionConverter::test_convert_odml_file_document", "test/test_version_converter.py::TestVersionConverter::test_convert_odml_file_property", "test/test_version_converter.py::TestVersionConverter::test_convert_odml_file_section", "test/test_version_converter.py::TestVersionConverter::test_convert_xml_file", "test/test_version_converter.py::TestVersionConverter::test_convert_yaml_file", "test/test_version_converter.py::TestVersionConverter::test_handle_include", "test/test_version_converter.py::TestVersionConverter::test_handle_repository", "test/test_version_converter.py::TestVersionConverter::test_parse_dict_document", "test/test_version_converter.py::TestVersionConverter::test_parse_dict_properties", "test/test_version_converter.py::TestVersionConverter::test_parse_dict_sections", "test/test_version_converter.py::TestVersionConverter::test_parse_dict_values", "test/test_version_converter.py::TestVersionConverter::test_replace_same_name_entites", "test/test_version_converter.py::TestVersionConverter::test_write_to_file" ]
[]
BSD 4-Clause "Original" or "Old" License
2,821
[ ".travis.yml", "odml/tools/xmlparser.py", "odml/tools/version_converter.py" ]
[ ".travis.yml", "odml/tools/xmlparser.py", "odml/tools/version_converter.py" ]
dask__dask-3810
3dddd1148d1050ea7eca16a9c6dd2bc187adb02a
2018-07-24 21:44:03
b8816eb498bfe4a24ace89484b2df2af3d181bfe
mrocklin: @crusaderky do you think that this is ready to merge? crusaderky: @mrocklin yes, I was just waiting for CI to give green light
diff --git a/dask/array/chunk.py b/dask/array/chunk.py index 498edfdf7..a36a4de7f 100644 --- a/dask/array/chunk.py +++ b/dask/array/chunk.py @@ -9,6 +9,7 @@ import numpy as np from . import numpy_compat as npcompat from ..compatibility import getargspec +from ..core import flatten from ..utils import ignoring try: @@ -233,6 +234,7 @@ def argtopk(a_plus_idx, k, axis, keepdims): axis = axis[0] if isinstance(a_plus_idx, list): + a_plus_idx = list(flatten(a_plus_idx)) a = np.concatenate([ai for ai, _ in a_plus_idx], axis) idx = np.concatenate([broadcast_to(idxi, ai.shape) for ai, idxi in a_plus_idx], axis)
Regression in 0.18.2: argtopk(split_every=2) broken ``` import dask.array as da a = da.from_array([[1, 4, 2, 5, 3], [7, 1, 8, 0, 2]], chunks=1) a.argtopk(-3, split_every=2).compute() AttributeError: 'tuple' object has no attribute 'shape' ``` I'm still in the middle of investigating how it happened and, most importantly, why the regression tests didn't spot it.
dask/dask
diff --git a/dask/array/tests/test_reductions.py b/dask/array/tests/test_reductions.py index c0e642847..83db8b57b 100644 --- a/dask/array/tests/test_reductions.py +++ b/dask/array/tests/test_reductions.py @@ -460,7 +460,26 @@ def test_topk_argtopk1(npfunc, daskfunc, split_every): daskfunc(b, -k, axis=3, split_every=split_every) -def test_topk_argtopk2(): [email protected]('npfunc,daskfunc', [ + (np.sort, da.topk), + (np.argsort, da.argtopk), +]) [email protected]('split_every', [None, 2, 3, 4]) [email protected]('chunksize', [1, 2, 3, 4, 5, 10]) +def test_topk_argtopk2(npfunc, daskfunc, split_every, chunksize): + """Fine test use cases when k is larger than chunk size""" + a = da.random.random((10, ), chunks=chunksize) + k = 5 + + # top 5 elements, sorted descending + assert_eq(npfunc(a)[-k:][::-1], + daskfunc(a, k, split_every=split_every)) + # bottom 5 elements, sorted ascending + assert_eq(npfunc(a)[:k], + daskfunc(a, -k, split_every=split_every)) + + +def test_topk_argtopk3(): a = da.random.random((10, 20, 30), chunks=(4, 8, 8)) # Support for deprecated API for topk @@ -468,5 +487,7 @@ def test_topk_argtopk2(): assert_eq(da.topk(a, 5), da.topk(5, a)) # As Array methods - assert_eq(a.topk(5, axis=1, split_every=2), da.topk(a, 5, axis=1, split_every=2)) - assert_eq(a.argtopk(5, axis=1, split_every=2), da.argtopk(a, 5, axis=1, split_every=2)) + assert_eq(a.topk(5, axis=1, split_every=2), + da.topk(a, 5, axis=1, split_every=2)) + assert_eq(a.argtopk(5, axis=1, split_every=2), + da.argtopk(a, 5, axis=1, split_every=2))
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.18
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-xdist" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 click==8.0.4 cloudpickle==2.2.1 -e git+https://github.com/dask/dask.git@3dddd1148d1050ea7eca16a9c6dd2bc187adb02a#egg=dask distributed==1.28.1 execnet==1.9.0 HeapDict==1.0.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack==1.0.5 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 partd==1.2.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 six==1.17.0 sortedcontainers==2.4.0 tblib==1.7.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work toolz==0.12.0 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zict==2.1.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.0.4 - cloudpickle==2.2.1 - distributed==1.28.1 - execnet==1.9.0 - heapdict==1.0.1 - locket==1.0.0 - msgpack==1.0.5 - numpy==1.19.5 - pandas==1.1.5 - partd==1.2.0 - psutil==7.0.0 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==1.7.0 - toolz==0.12.0 - tornado==6.1 - zict==2.1.0 prefix: /opt/conda/envs/dask
[ "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-2-argsort-argtopk]" ]
[ "dask/array/tests/test_reductions.py::test_nan_object[nansum]", "dask/array/tests/test_reductions.py::test_nan_object[sum]", "dask/array/tests/test_reductions.py::test_nan_object[nanmin]", "dask/array/tests/test_reductions.py::test_nan_object[min]", "dask/array/tests/test_reductions.py::test_nan_object[nanmax]", "dask/array/tests/test_reductions.py::test_nan_object[max]" ]
[ "dask/array/tests/test_reductions.py::test_reductions_1D[f4]", "dask/array/tests/test_reductions.py::test_reductions_1D[i4]", "dask/array/tests/test_reductions.py::test_reduction_errors", "dask/array/tests/test_reductions.py::test_arg_reductions[argmin-argmin]", "dask/array/tests/test_reductions.py::test_arg_reductions[argmax-argmax]", "dask/array/tests/test_reductions.py::test_arg_reductions[_nanargmin-nanargmin]", "dask/array/tests/test_reductions.py::test_arg_reductions[_nanargmax-nanargmax]", "dask/array/tests/test_reductions.py::test_nanarg_reductions[_nanargmin-nanargmin]", "dask/array/tests/test_reductions.py::test_nanarg_reductions[_nanargmax-nanargmax]", "dask/array/tests/test_reductions.py::test_reductions_2D_nans", "dask/array/tests/test_reductions.py::test_moment", "dask/array/tests/test_reductions.py::test_reductions_with_negative_axes", "dask/array/tests/test_reductions.py::test_nan", "dask/array/tests/test_reductions.py::test_0d_array", "dask/array/tests/test_reductions.py::test_reduction_on_scalar", "dask/array/tests/test_reductions.py::test_reductions_with_empty_array", "dask/array/tests/test_reductions.py::test_tree_reduce_depth", "dask/array/tests/test_reductions.py::test_tree_reduce_set_options", "dask/array/tests/test_reductions.py::test_reduction_names", "dask/array/tests/test_reductions.py::test_array_reduction_out[sum]", "dask/array/tests/test_reductions.py::test_array_reduction_out[argmax]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[None-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[None-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[0-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[0-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[1-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[1-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[-1-cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_axis[-1-cumprod]", "dask/array/tests/test_reductions.py::test_array_cumreduction_out[cumsum]", "dask/array/tests/test_reductions.py::test_array_cumreduction_out[cumprod]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[8-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk1[8-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[1-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[2-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[3-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[4-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[5-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-None-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-None-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-2-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-2-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-3-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-3-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-4-sort-topk]", "dask/array/tests/test_reductions.py::test_topk_argtopk2[10-4-argsort-argtopk]", "dask/array/tests/test_reductions.py::test_topk_argtopk3" ]
[]
BSD 3-Clause "New" or "Revised" License
2,822
[ "dask/array/chunk.py" ]
[ "dask/array/chunk.py" ]
F5Networks__f5-common-python-1485
fb001e95cc16ddf84ba03c3193582c69cd8ce0df
2018-07-24 22:21:08
a97a2ef2abb9114a2152453671dee0ac2ae70d1f
jasonrahm: @drenout can you change your two append lines to this line instead: `self._meta_data['allowed_commands'].extend(['revoke', 'install'])`
diff --git a/f5/bigip/tm/sys/license.py b/f5/bigip/tm/sys/license.py index df150f9..c38d3d4 100644 --- a/f5/bigip/tm/sys/license.py +++ b/f5/bigip/tm/sys/license.py @@ -41,7 +41,7 @@ class License(UnnamedResource, CommandExecutionMixin): super(License, self).__init__(sys) self._meta_data['required_json_kind'] =\ "tm:sys:license:licensestats" - self._meta_data['allowed_commands'].append('revoke') + self._meta_data['allowed_commands'].extend(['revoke', 'install']) def exec_cmd(self, command, **kwargs): self._is_allowed_command(command)
Add license installation Add license installation command similar to the revoking the license: Code snippet for the revoking the license: ```python mgnt = ManagementRoot(bigip_mgnt, user, pass) mgnt.tm.sys.license.exec_cmd('revoke')` ``` Code snippet for the license installation: ```python mgnt = ManagementRoot(bigip_mgnt, user, pass) mgnt.tm.sys.license.exec_cmd('install', registrationKey=regkey) ```
F5Networks/f5-common-python
diff --git a/f5/bigip/tm/sys/test/unit/test_license.py b/f5/bigip/tm/sys/test/unit/test_license.py index 617edf2..27222bc 100644 --- a/f5/bigip/tm/sys/test/unit/test_license.py +++ b/f5/bigip/tm/sys/test/unit/test_license.py @@ -37,3 +37,17 @@ def test_delete_raises(FakeLicense): with pytest.raises(UnsupportedMethod) as EIO: FakeLicense.delete() assert str(EIO.value) == "License does not support the delete method" + + +def test_exec_install(FakeLicense): + assert "install" in FakeLicense._meta_data['allowed_commands'] + FakeLicense._meta_data['bigip']._meta_data.__getitem__.return_value = "14.0.0" + FakeLicense._exec_cmd = mock.MagicMock() + version_dict = {"13.1.0": 13, "14.0.0": 14} + + def get_version(version): + return version_dict[version] + + License.LooseVersion = mock.MagicMock(side_effect=get_version) + FakeLicense.exec_cmd("install", registrationKey='1234-56789-0') + FakeLicense._exec_cmd.assert_called_with("install", registrationKey='1234-56789-0')
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
3.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio", "pytest-bdd", "pytest-benchmark", "pytest-randomly", "responses", "mock", "hypothesis", "freezegun", "trustme", "requests-mock", "requests", "tomlkit" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 coverage==7.8.0 cryptography==44.0.2 exceptiongroup==1.2.2 execnet==2.1.1 f5-icontrol-rest==1.3.13 -e git+https://github.com/F5Networks/f5-common-python.git@fb001e95cc16ddf84ba03c3193582c69cd8ce0df#egg=f5_sdk freezegun==1.5.1 gherkin-official==29.0.0 hypothesis==6.130.6 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 Mako==1.3.9 MarkupSafe==3.0.2 mock==5.2.0 packaging==24.2 parse==1.20.2 parse_type==0.6.4 pluggy==1.5.0 py-cpuinfo==9.0.0 pycparser==2.22 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-bdd==8.1.0 pytest-benchmark==5.1.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.16.0 pytest-xdist==3.6.1 python-dateutil==2.9.0.post0 PyYAML==6.0.2 requests==2.32.3 requests-mock==1.12.1 responses==0.25.7 six==1.17.0 sortedcontainers==2.4.0 tomli==2.2.1 tomlkit==0.13.2 trustme==1.2.1 typing_extensions==4.13.0 urllib3==2.3.0 zipp==3.21.0
name: f5-common-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - coverage==7.8.0 - cryptography==44.0.2 - exceptiongroup==1.2.2 - execnet==2.1.1 - f5-icontrol-rest==1.3.13 - freezegun==1.5.1 - gherkin-official==29.0.0 - hypothesis==6.130.6 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - mako==1.3.9 - markupsafe==3.0.2 - mock==5.2.0 - packaging==24.2 - parse==1.20.2 - parse-type==0.6.4 - pluggy==1.5.0 - py-cpuinfo==9.0.0 - pycparser==2.22 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-bdd==8.1.0 - pytest-benchmark==5.1.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-randomly==3.16.0 - pytest-xdist==3.6.1 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - requests==2.32.3 - requests-mock==1.12.1 - responses==0.25.7 - six==1.17.0 - sortedcontainers==2.4.0 - tomli==2.2.1 - tomlkit==0.13.2 - trustme==1.2.1 - typing-extensions==4.13.0 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/f5-common-python
[ "f5/bigip/tm/sys/test/unit/test_license.py::test_exec_install" ]
[]
[ "f5/bigip/tm/sys/test/unit/test_license.py::test_delete_raises", "f5/bigip/tm/sys/test/unit/test_license.py::test_create_raises" ]
[]
Apache License 2.0
2,823
[ "f5/bigip/tm/sys/license.py" ]
[ "f5/bigip/tm/sys/license.py" ]
Azure__WALinuxAgent-1273
066d711c4dd3f5a166a19da1910ee92b35cd3cbb
2018-07-25 01:14:02
6e9b985c1d7d564253a1c344bab01b45093103cd
boumenot: Sync'ed offline. LGTM.
diff --git a/azurelinuxagent/common/utils/processutil.py b/azurelinuxagent/common/utils/processutil.py index fe9dd4a5..9c0eb24b 100644 --- a/azurelinuxagent/common/utils/processutil.py +++ b/azurelinuxagent/common/utils/processutil.py @@ -16,7 +16,7 @@ # # Requires Python 2.6+ and Openssl 1.0+ # - +import multiprocessing import subprocess import sys import os @@ -100,75 +100,57 @@ def _destroy_process(process, signal_to_send=signal.SIGKILL): pass # If the process is already gone, that's fine -def capture_from_process_modern(process, cmd, timeout): - try: - stdout, stderr = process.communicate(timeout=timeout) - except subprocess.TimeoutExpired: - # Just kill the process. The .communicate method will gather stdout/stderr, close those pipes, and reap - # the zombie process. That is, .communicate() does all the other stuff that _destroy_process does. +def capture_from_process_poll(process, cmd, timeout): + """ + If the process forks, we cannot capture anything we block until the process tree completes + """ + retry = timeout + while retry > 0 and process.poll() is None: + time.sleep(1) + retry -= 1 + + # process did not fork, timeout expired + if retry == 0: os.killpg(os.getpgid(process.pid), signal.SIGKILL) stdout, stderr = process.communicate() msg = format_stdout_stderr(sanitize(stdout), sanitize(stderr)) raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, cmd, msg)) - except OSError as e: - _destroy_process(process, signal.SIGKILL) - raise ExtensionError("Error while running '{0}': {1}".format(cmd, e.strerror)) - except ValueError: - _destroy_process(process, signal.SIGKILL) - raise ExtensionError("Invalid timeout ({0}) specified for '{1}'".format(timeout, cmd)) - except Exception as e: - _destroy_process(process, signal.SIGKILL) - raise ExtensionError("Exception while running '{0}': {1}".format(cmd, e)) - return stdout, stderr + # process completed or forked + return_code = process.wait() + if return_code != 0: + raise ExtensionError("Non-zero exit code: {0}, {1}".format(return_code, cmd)) + stderr = b'' + stdout = b'cannot collect stdout' -def capture_from_process_pre_33(process, cmd, timeout): - """ - Can't use process.communicate(timeout=), so do it the hard way. - """ - watcher_process_exited = 0 - watcher_process_timed_out = 1 - - def kill_on_timeout(pid, watcher_timeout): - """ - Check for the continued existence of pid once per second. If pid no longer exists, exit with code 0. - If timeout (in seconds) elapses, kill pid and exit with code 1. - """ - for iteration in range(watcher_timeout): - time.sleep(1) - try: - os.kill(pid, 0) - except OSError as ex: - if ESRCH == ex.errno: # Process no longer exists - exit(watcher_process_exited) - os.killpg(os.getpgid(pid), signal.SIGKILL) - exit(watcher_process_timed_out) - - watcher = Process(target=kill_on_timeout, args=(process.pid, timeout)) - watcher.start() + # attempt non-blocking process communication to capture output + def proc_comm(_process, _return): + try: + _stdout, _stderr = _process.communicate() + _return[0] = _stdout + _return[1] = _stderr + except Exception: + pass try: - # Now, block "forever" waiting on process. If the timeout-limited Event wait in the watcher pops, - # it will kill the process and Popen.communicate() will return - stdout, stderr = process.communicate() - except OSError as e: - _destroy_process(process, signal.SIGKILL) - raise ExtensionError("Error while running '{0}': {1}".format(cmd, e.strerror)) - except Exception as e: - _destroy_process(process, signal.SIGKILL) - raise ExtensionError("Exception while running '{0}': {1}".format(cmd, e)) + mgr = multiprocessing.Manager() + ret_dict = mgr.dict() - timeout_happened = False - watcher.join(1) - if watcher.is_alive(): - watcher.terminate() - else: - timeout_happened = (watcher.exitcode == watcher_process_timed_out) + cproc = Process(target=proc_comm, args=(process, ret_dict)) + cproc.start() - if timeout_happened: - msg = format_stdout_stderr(sanitize(stdout), sanitize(stderr)) - raise ExtensionError("Timeout({0}): {1}\n{2}".format(timeout, cmd, msg)) + # allow 1s to capture output + cproc.join(1) + + if cproc.is_alive(): + cproc.terminate() + + stdout = ret_dict[0] + stderr = ret_dict[1] + + except Exception: + pass return stdout, stderr @@ -204,10 +186,7 @@ def capture_from_process_raw(process, cmd, timeout): _destroy_process(process, signal.SIGKILL) raise ExtensionError("Subprocess was not root of its own process group") - if sys.version_info < (3, 3): - stdout, stderr = capture_from_process_pre_33(process, cmd, timeout) - else: - stdout, stderr = capture_from_process_modern(process, cmd, timeout) + stdout, stderr = capture_from_process_poll(process, cmd, timeout) return stdout, stderr diff --git a/azurelinuxagent/common/version.py b/azurelinuxagent/common/version.py index 650eaf0f..4d10b024 100644 --- a/azurelinuxagent/common/version.py +++ b/azurelinuxagent/common/version.py @@ -113,7 +113,7 @@ def get_distro(): AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" -AGENT_VERSION = '2.2.29' +AGENT_VERSION = '2.2.30' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """ The Azure Linux Agent supports the provisioning and running of Linux diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py index 21cc2ef2..f46447ab 100644 --- a/azurelinuxagent/ga/exthandlers.py +++ b/azurelinuxagent/ga/exthandlers.py @@ -1002,12 +1002,12 @@ class ExtHandlerInstance(object): CGroups.add_to_extension_cgroup(self.ext_handler.name) process = subprocess.Popen(full_path, - shell=True, - cwd=base_dir, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=os.environ, - preexec_fn=pre_exec_function) + shell=True, + cwd=base_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=pre_exec_function) except OSError as e: raise ExtensionError("Failed to launch '{0}': {1}".format(full_path, e.strerror)) @@ -1016,7 +1016,9 @@ class ExtHandlerInstance(object): msg = capture_from_process(process, cmd, timeout) ret = process.poll() - if ret is None or ret != 0: + if ret is None: + raise ExtensionError("Process {0} was not terminated: {1}\n{2}".format(process.pid, cmd, msg)) + if ret != 0: raise ExtensionError("Non-zero exit code: {0}, {1}\n{2}".format(ret, cmd, msg)) duration = elapsed_milliseconds(begin_utc)
Agent Prematurely Terminates Long Running Extensions A user opened an issue against [Azure/custom-script-extension-linux #139](https://github.com/Azure/custom-script-extension-linux/issues/139) about the extension prematurely timing out. I have verified this issue against 2.2.29. ```sh az vm extension set -g my-resource-group --vm-name vm1 --publisher Microsoft.Azure.Extensions --name CustomScript --version 2.0 --settings "{ \"commandToExecute\": \"sleep 360; ls\" }" ``` The agent will kill the process after five minutes, which is different from previous behavior. The logs for the agent claim that it moved the extension PID into a cGroup, but the PID it referenced in the logs is the PID *agent ext-handler process* not the extension.
Azure/WALinuxAgent
diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py index a3aa7ae9..af5dcbba 100644 --- a/tests/ga/test_update.py +++ b/tests/ga/test_update.py @@ -1000,8 +1000,8 @@ class TestUpdate(UpdateTestCase): self.assertTrue(2 < len(self.update_handler.agents)) # Purge every other agent - kept_agents = self.update_handler.agents[1::2] - purged_agents = self.update_handler.agents[::2] + kept_agents = self.update_handler.agents[::2] + purged_agents = self.update_handler.agents[1::2] # Reload and assert only the kept agents remain on disk self.update_handler.agents = kept_agents diff --git a/tests/utils/test_process_util.py b/tests/utils/test_process_util.py index 85abdb5a..a950e556 100644 --- a/tests/utils/test_process_util.py +++ b/tests/utils/test_process_util.py @@ -14,12 +14,12 @@ # # Requires Python 2.6+ and Openssl 1.0+ # - +import datetime import subprocess from azurelinuxagent.common.exception import ExtensionError from azurelinuxagent.common.utils.processutil \ - import format_stdout_stderr, capture_from_process, capture_from_process_raw + import format_stdout_stderr, capture_from_process from tests.tools import * import sys @@ -121,7 +121,14 @@ class TestProcessUtils(AgentTestCase): actual = capture_from_process(process, cmd) self.assertEqual(expected, actual) - def test_process_timeout(self): + def test_process_timeout_non_forked(self): + """ + non-forked process runs for 20 seconds, timeout is 10 seconds + we expect: + - test to run in just over 10 seconds + - exception should be thrown + - output should be collected + """ cmd = "{0} -t 20".format(process_target) process = subprocess.Popen(cmd, shell=True, @@ -130,19 +137,93 @@ class TestProcessUtils(AgentTestCase): env=os.environ, preexec_fn=os.setsid) - if sys.version_info < (2, 7): - self.assertRaises(ExtensionError, capture_from_process_raw, process, cmd, 10) - else: - with self.assertRaises(ExtensionError) as ee: - capture_from_process_raw(process, cmd, 10) + try: + capture_from_process(process, 'sleep 20', 10) + self.fail('Timeout exception was expected') + except ExtensionError as e: + body = str(e) + self.assertTrue('Timeout(10)' in body) + self.assertTrue('Iteration 9' in body) + self.assertFalse('Iteration 11' in body) + except Exception as gen_ex: + self.fail('Unexpected exception: {0}'.format(gen_ex)) - body = str(ee.exception) - if sys.version_info >= (3, 2): - self.assertNotRegex(body, "Iteration 12") - self.assertRegex(body, "Iteration 8") - else: - self.assertNotRegexpMatches(body, "Iteration 12") - self.assertRegexpMatches(body, "Iteration 8") + def test_process_timeout_forked(self): + """ + forked process runs for 20 seconds, timeout is 10 seconds + we expect: + - test to run in less than 3 seconds + - no exception should be thrown + - no output is collected + """ + cmd = "{0} -t 20 &".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + start = datetime.datetime.utcnow() + try: + cap = capture_from_process(process, 'sleep 20 &', 10) + except Exception as e: + self.fail('No exception should be thrown for a long running process which forks: {0}'.format(e)) + duration = datetime.datetime.utcnow() - start + + self.assertTrue(duration < datetime.timedelta(seconds=3)) + self.assertEqual('[stdout]\ncannot collect stdout\n\n[stderr]\n', cap) + + def test_process_behaved_non_forked(self): + """ + non-forked process runs for 10 seconds, timeout is 20 seconds + we expect: + - test to run in just over 10 seconds + - no exception should be thrown + - output should be collected + """ + cmd = "{0} -t 10".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + try: + body = capture_from_process(process, 'sleep 10', 20) + except Exception as gen_ex: + self.fail('Unexpected exception: {0}'.format(gen_ex)) + + self.assertFalse('Timeout' in body) + self.assertTrue('Iteration 9' in body) + self.assertTrue('Iteration 10' in body) + + def test_process_behaved_forked(self): + """ + forked process runs for 10 seconds, timeout is 20 seconds + we expect: + - test to run in under 3 seconds + - no exception should be thrown + - output is not collected + """ + cmd = "{0} -t 10 &".format(process_target) + process = subprocess.Popen(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=os.environ, + preexec_fn=os.setsid) + + start = datetime.datetime.utcnow() + try: + body = capture_from_process(process, 'sleep 10 &', 20) + except Exception as e: + self.fail('No exception should be thrown for a well behaved process which forks: {0}'.format(e)) + duration = datetime.datetime.utcnow() - start + + self.assertTrue(duration < datetime.timedelta(seconds=3)) + self.assertEqual('[stdout]\ncannot collect stdout\n\n[stderr]\n', body) def test_process_bad_pgid(self): """
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 3 }
2.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.4", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work nose==1.3.7 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work -e git+https://github.com/Azure/WALinuxAgent.git@066d711c4dd3f5a166a19da1910ee92b35cd3cbb#egg=WALinuxAgent zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: WALinuxAgent channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - nose==1.3.7 prefix: /opt/conda/envs/WALinuxAgent
[ "tests/ga/test_update.py::TestUpdate::test_purge_agents", "tests/utils/test_process_util.py::TestProcessUtils::test_process_behaved_forked", "tests/utils/test_process_util.py::TestProcessUtils::test_process_timeout_forked" ]
[]
[ "tests/ga/test_update.py::TestGuestAgentError::test_clear", "tests/ga/test_update.py::TestGuestAgentError::test_creation", "tests/ga/test_update.py::TestGuestAgentError::test_mark_failure", "tests/ga/test_update.py::TestGuestAgentError::test_mark_failure_permanent", "tests/ga/test_update.py::TestGuestAgentError::test_save", "tests/ga/test_update.py::TestGuestAgentError::test_str", "tests/ga/test_update.py::TestGuestAgent::test_clear_error", "tests/ga/test_update.py::TestGuestAgent::test_creation", "tests/ga/test_update.py::TestGuestAgent::test_download", "tests/ga/test_update.py::TestGuestAgent::test_download_fail", "tests/ga/test_update.py::TestGuestAgent::test_download_fallback", "tests/ga/test_update.py::TestGuestAgent::test_ensure_download_skips_blacklisted", "tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded", "tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_download_fails", "tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_load_manifest_fails", "tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_unpack_fails", "tests/ga/test_update.py::TestGuestAgent::test_ioerror_not_blacklisted", "tests/ga/test_update.py::TestGuestAgent::test_is_available", "tests/ga/test_update.py::TestGuestAgent::test_is_blacklisted", "tests/ga/test_update.py::TestGuestAgent::test_is_downloaded", "tests/ga/test_update.py::TestGuestAgent::test_load_error", "tests/ga/test_update.py::TestGuestAgent::test_load_manifest", "tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_empty", "tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_malformed", "tests/ga/test_update.py::TestGuestAgent::test_load_manifest_missing", "tests/ga/test_update.py::TestGuestAgent::test_mark_failure", "tests/ga/test_update.py::TestGuestAgent::test_resource_gone_error_not_blacklisted", "tests/ga/test_update.py::TestGuestAgent::test_unpack", "tests/ga/test_update.py::TestGuestAgent::test_unpack_fail", "tests/ga/test_update.py::TestUpdate::test_creation", "tests/ga/test_update.py::TestUpdate::test_emit_restart_event_emits_event_if_not_clean_start", "tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans", "tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans_ignores_exceptions", "tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans_kills_after_interval", "tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans_skips_if_no_orphans", "tests/ga/test_update.py::TestUpdate::test_ensure_partition_assigned", "tests/ga/test_update.py::TestUpdate::test_ensure_readonly_leaves_unmodified", "tests/ga/test_update.py::TestUpdate::test_ensure_readonly_sets_readonly", "tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_ignores_installed_agent", "tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_raises_exception_for_restarting_agent", "tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_resets_with_new_agent", "tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_for_long_restarts", "tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_too_few_restarts", "tests/ga/test_update.py::TestUpdate::test_filter_blacklisted_agents", "tests/ga/test_update.py::TestUpdate::test_find_agents", "tests/ga/test_update.py::TestUpdate::test_find_agents_does_reload", "tests/ga/test_update.py::TestUpdate::test_find_agents_sorts", "tests/ga/test_update.py::TestUpdate::test_get_host_plugin_returns_host_for_wireserver", "tests/ga/test_update.py::TestUpdate::test_get_host_plugin_returns_none_otherwise", "tests/ga/test_update.py::TestUpdate::test_get_latest_agent", "tests/ga/test_update.py::TestUpdate::test_get_latest_agent_excluded", "tests/ga/test_update.py::TestUpdate::test_get_latest_agent_no_updates", "tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skip_updates", "tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skips_unavailable", "tests/ga/test_update.py::TestUpdate::test_get_pid_files", "tests/ga/test_update.py::TestUpdate::test_get_pid_files_returns_previous", "tests/ga/test_update.py::TestUpdate::test_is_clean_start_returns_false_for_exceptions", "tests/ga/test_update.py::TestUpdate::test_is_clean_start_returns_false_when_sentinel_exists", "tests/ga/test_update.py::TestUpdate::test_is_clean_start_returns_true_when_no_sentinel", "tests/ga/test_update.py::TestUpdate::test_is_orphaned_returns_false_if_parent_exists", "tests/ga/test_update.py::TestUpdate::test_is_orphaned_returns_true_if_parent_does_not_exist", "tests/ga/test_update.py::TestUpdate::test_is_orphaned_returns_true_if_parent_is_init", "tests/ga/test_update.py::TestUpdate::test_is_version_available", "tests/ga/test_update.py::TestUpdate::test_is_version_available_accepts_current", "tests/ga/test_update.py::TestUpdate::test_is_version_available_rejects", "tests/ga/test_update.py::TestUpdate::test_is_version_available_rejects_by_default", "tests/ga/test_update.py::TestUpdate::test_package_filter_for_agent_manifest", "tests/ga/test_update.py::TestUpdate::test_run", "tests/ga/test_update.py::TestUpdate::test_run_clears_sentinel_on_successful_exit", "tests/ga/test_update.py::TestUpdate::test_run_emits_restart_event", "tests/ga/test_update.py::TestUpdate::test_run_keeps_running", "tests/ga/test_update.py::TestUpdate::test_run_latest", "tests/ga/test_update.py::TestUpdate::test_run_latest_captures_signals", "tests/ga/test_update.py::TestUpdate::test_run_latest_creates_only_one_signal_handler", "tests/ga/test_update.py::TestUpdate::test_run_latest_defaults_to_current", "tests/ga/test_update.py::TestUpdate::test_run_latest_exception_blacklists", "tests/ga/test_update.py::TestUpdate::test_run_latest_exception_does_not_blacklist_if_terminating", "tests/ga/test_update.py::TestUpdate::test_run_latest_forwards_output", "tests/ga/test_update.py::TestUpdate::test_run_latest_nonzero_code_marks_failures", "tests/ga/test_update.py::TestUpdate::test_run_latest_passes_child_args", "tests/ga/test_update.py::TestUpdate::test_run_latest_polling_stops_at_failure", "tests/ga/test_update.py::TestUpdate::test_run_latest_polling_stops_at_success", "tests/ga/test_update.py::TestUpdate::test_run_latest_polls_and_waits_for_success", "tests/ga/test_update.py::TestUpdate::test_run_latest_polls_frequently_if_installed_is_latest", "tests/ga/test_update.py::TestUpdate::test_run_latest_polls_moderately_if_installed_not_latest", "tests/ga/test_update.py::TestUpdate::test_run_leaves_sentinel_on_unsuccessful_exit", "tests/ga/test_update.py::TestUpdate::test_run_stops_if_orphaned", "tests/ga/test_update.py::TestUpdate::test_run_stops_if_update_available", "tests/ga/test_update.py::TestUpdate::test_set_agents_sets_agents", "tests/ga/test_update.py::TestUpdate::test_set_agents_sorts_agents", "tests/ga/test_update.py::TestUpdate::test_set_sentinel", "tests/ga/test_update.py::TestUpdate::test_set_sentinel_writes_current_agent", "tests/ga/test_update.py::TestUpdate::test_shutdown", "tests/ga/test_update.py::TestUpdate::test_shutdown_ignores_exceptions", "tests/ga/test_update.py::TestUpdate::test_shutdown_ignores_missing_sentinel_file", "tests/ga/test_update.py::TestUpdate::test_update_available_returns_true_if_current_gets_blacklisted", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_handles_missing_family", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_includes_old_agents", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_purges_old_agents", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_returns_true_on_first_use", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_if_too_frequent", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_if_when_no_new_versions", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_when_no_versions", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_when_updates_are_disabled", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_sorts", "tests/ga/test_update.py::TestUpdate::test_upgrade_available_will_refresh_goal_state", "tests/ga/test_update.py::TestUpdate::test_write_pid_file", "tests/ga/test_update.py::TestUpdate::test_write_pid_file_ignores_exceptions", "tests/ga/test_update.py::MonitorThreadTest::test_check_if_env_thread_is_alive", "tests/ga/test_update.py::MonitorThreadTest::test_check_if_monitor_thread_is_alive", "tests/ga/test_update.py::MonitorThreadTest::test_restart_env_thread", "tests/ga/test_update.py::MonitorThreadTest::test_restart_env_thread_if_not_alive", "tests/ga/test_update.py::MonitorThreadTest::test_restart_monitor_thread", "tests/ga/test_update.py::MonitorThreadTest::test_restart_monitor_thread_if_not_alive", "tests/ga/test_update.py::MonitorThreadTest::test_start_threads", "tests/utils/test_process_util.py::TestProcessUtils::test_format_stdout_stderr00", "tests/utils/test_process_util.py::TestProcessUtils::test_format_stdout_stderr01", "tests/utils/test_process_util.py::TestProcessUtils::test_format_stdout_stderr02", "tests/utils/test_process_util.py::TestProcessUtils::test_format_stdout_stderr03", "tests/utils/test_process_util.py::TestProcessUtils::test_format_stdout_stderr04", "tests/utils/test_process_util.py::TestProcessUtils::test_format_stdout_stderr05", "tests/utils/test_process_util.py::TestProcessUtils::test_process_bad_pgid", "tests/utils/test_process_util.py::TestProcessUtils::test_process_behaved_non_forked", "tests/utils/test_process_util.py::TestProcessUtils::test_process_stdout_stderr", "tests/utils/test_process_util.py::TestProcessUtils::test_process_timeout_non_forked" ]
[]
Apache License 2.0
2,824
[ "azurelinuxagent/common/version.py", "azurelinuxagent/common/utils/processutil.py", "azurelinuxagent/ga/exthandlers.py" ]
[ "azurelinuxagent/common/version.py", "azurelinuxagent/common/utils/processutil.py", "azurelinuxagent/ga/exthandlers.py" ]
joblib__joblib-725
b80c96cb905ca6a958a5769d772c76be1d8f3acf
2018-07-25 12:27:37
cbb660126d2ad8ac9f9ae9ffc16dd551ca937ebd
codecov[bot]: # [Codecov](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=h1) Report > Merging [#725](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=desc) into [master](https://codecov.io/gh/joblib/joblib/commit/b80c96cb905ca6a958a5769d772c76be1d8f3acf?src=pr&el=desc) will **decrease** coverage by `0.13%`. > The diff coverage is `n/a`. [![Impacted file tree graph](https://codecov.io/gh/joblib/joblib/pull/725/graphs/tree.svg?height=150&src=pr&width=650&token=gA6LF5DGTW)](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #725 +/- ## ========================================== - Coverage 95.23% 95.09% -0.14% ========================================== Files 40 40 Lines 5769 5769 ========================================== - Hits 5494 5486 -8 - Misses 275 283 +8 ``` | [Impacted Files](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [joblib/disk.py](https://codecov.io/gh/joblib/joblib/pull/725/diff?src=pr&el=tree#diff-am9ibGliL2Rpc2sucHk=) | `81.66% <0%> (-6.67%)` | :arrow_down: | | [joblib/\_parallel\_backends.py](https://codecov.io/gh/joblib/joblib/pull/725/diff?src=pr&el=tree#diff-am9ibGliL19wYXJhbGxlbF9iYWNrZW5kcy5weQ==) | `96% <0%> (-0.8%)` | :arrow_down: | | [joblib/test/test\_memory.py](https://codecov.io/gh/joblib/joblib/pull/725/diff?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9tZW1vcnkucHk=) | `97.49% <0%> (-0.36%)` | :arrow_down: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=footer). Last update [b80c96c...43c5f0d](https://codecov.io/gh/joblib/joblib/pull/725?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/.travis.yml b/.travis.yml index 2c999f8..594615f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,13 +11,13 @@ env: - PYTHON_VERSION="2.7" NUMPY_VERSION="1.8" COVERAGE="true" - PYTHON_VERSION="3.4" NUMPY_VERSION="1.10" # NUMPY_VERSION not set means numpy is not installed - - PYTHON_VERSION="3.4" COVERAGE="true" + - PYTHON_VERSION="3.6" COVERAGE="true" DISTRIBUTED_VERSION="1.22" - PYTHON_VERSION="3.5" NUMPY_VERSION="1.12" - PYTHON_VERSION="3.6" NUMPY_VERSION="1.14" COVERAGE="true" CYTHON="true" # multiprocesssing disabled via the JOBLIB_MULTIPROCESSING environment variable - PYTHON_VERSION="3.6" NUMPY_VERSION="1.14" JOBLIB_MULTIPROCESSING=0 COVERAGE="true" # Make sure we do not depend on lz4 to run joblib. - - PYTHON_VERSION="3.6" NUMPY_VERSION="1.12" COVERAGE="true" NO_LZ4="true" CYTHON="true" + - PYTHON_VERSION="3.6" NUMPY_VERSION="1.14" COVERAGE="true" NO_LZ4="true" CYTHON="true" DISTRIBUTED_VERSION="1.22" # flake8 linting on diff wrt common ancestor with upstream/master - SKIP_TESTS="true" FLAKE8_VERSION="3.5" PYTHON_VERSION="3.6" diff --git a/continuous_integration/appveyor/requirements.txt b/continuous_integration/appveyor/requirements.txt index 14384f7..b581b00 100644 --- a/continuous_integration/appveyor/requirements.txt +++ b/continuous_integration/appveyor/requirements.txt @@ -4,4 +4,5 @@ pytest pytest-cov pytest-timeout codecov +distributed lz4 diff --git a/continuous_integration/travis/install.sh b/continuous_integration/travis/install.sh index 98ec96b..ae24c71 100755 --- a/continuous_integration/travis/install.sh +++ b/continuous_integration/travis/install.sh @@ -20,7 +20,7 @@ print_conda_requirements() { # - for scikit-learn, SCIKIT_LEARN_VERSION is used TO_INSTALL_ALWAYS="pip pytest" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy flake8" + TO_INSTALL_MAYBE="python numpy distributed flake8" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" diff --git a/examples/compressors_comparison.py b/examples/compressors_comparison.py index 5f0f288..7a09a73 100644 --- a/examples/compressors_comparison.py +++ b/examples/compressors_comparison.py @@ -34,6 +34,8 @@ names = ("duration, protocol_type, service, flag, src_bytes, " "num_file_creations, ").split(', ') data = pd.read_csv(url, names=names) +# Take the first 1e6 data points, for a faster example +data = data.iloc[:int(1e6)] ############################################################################### # Dump and load the dataset without compression @@ -190,13 +192,13 @@ file_sizes = (raw_file_size, lz4_file_size, zlib_file_size, lzma_file_size) ind = np.arange(N) width = 0.5 -plt.figure(1) +plt.figure(1, figsize=(5, 4)) p1 = plt.bar(ind, dump_durations, width) p2 = plt.bar(ind, load_durations, width, bottom=dump_durations) plt.ylabel('Time in seconds') plt.title('Dump and load durations') plt.xticks(ind, ('Raw', 'LZ4', 'Zlib', 'LZMA')) -plt.yticks(np.arange(0, 30, 5)) +plt.yticks(np.arange(0, 5)) plt.legend((p1[0], p2[0]), ('Dump duration', 'Load duration')) ############################################################################### @@ -211,7 +213,7 @@ plt.legend((p1[0], p2[0]), ('Dump duration', 'Load duration')) # LZMA and Zlib, even if always slower for dumping data, are quite fast when # re-loading compressed data from disk. -plt.figure(2) +plt.figure(2, figsize=(5, 4)) plt.bar(ind, file_sizes, width, log=True) plt.ylabel('File size in MB') plt.xticks(ind, ('Raw', 'LZ4', 'Zlib', 'LZMA')) diff --git a/joblib/_dask.py b/joblib/_dask.py new file mode 100644 index 0000000..92b9627 --- /dev/null +++ b/joblib/_dask.py @@ -0,0 +1,259 @@ +from __future__ import print_function, division, absolute_import + +import contextlib + +from uuid import uuid4 +import weakref + +from .parallel import AutoBatchingMixin, ParallelBackendBase, BatchedCalls +from .parallel import parallel_backend + +try: + import distributed +except ImportError: + distributed = None + +if distributed is not None: + from distributed.client import Client, _wait + from distributed.utils import funcname, itemgetter + from distributed import get_client, secede, rejoin + from distributed.worker import thread_state + from distributed.sizeof import sizeof + from tornado import gen + + +def is_weakrefable(obj): + try: + weakref.ref(obj) + return True + except TypeError: + return False + + +class _WeakKeyDictionary: + """A variant of weakref.WeakKeyDictionary for unhashable objects. + + This datastructure is used to store futures for broadcasted data objects + such as large numpy arrays or pandas dataframes that are not hashable and + therefore cannot be used as keys of traditional python dicts. + + Futhermore using a dict with id(array) as key is not safe because the + Python is likely to reuse id of recently collected arrays. + """ + + def __init__(self): + self._data = {} + + def __getitem__(self, obj): + ref, val = self._data[id(obj)] + if ref() is not obj: + # In case of a race condition with on_destroy. + raise KeyError(obj) + return val + + def __setitem__(self, obj, value): + key = id(obj) + try: + ref, _ = self._data[key] + if ref() is not obj: + # In case of race condition with on_destroy. + raise KeyError(obj) + except KeyError: + # Insert the new entry in the mapping along with a weakref + # callback to automatically delete the entry from the mapping + # as soon as the object used as key is garbage collected. + def on_destroy(_): + del self._data[key] + ref = weakref.ref(obj, on_destroy) + self._data[key] = ref, value + + def __len__(self): + return len(self._data) + + def clear(self): + self._data.clear() + + +def _funcname(x): + try: + if isinstance(x, BatchedCalls): + x = x.items[0][0] + except Exception: + pass + return funcname(x) + + +class Batch(object): + def __init__(self, tasks): + self.tasks = tasks + + def __call__(self, *data): + results = [] + with parallel_backend('dask'): + for func, args, kwargs in self.tasks: + args = [a(data) if isinstance(a, itemgetter) else a + for a in args] + kwargs = {k: v(data) if isinstance(v, itemgetter) else v + for (k, v) in kwargs.items()} + results.append(func(*args, **kwargs)) + return results + + def __reduce__(self): + return Batch, (self.tasks,) + + +class DaskDistributedBackend(ParallelBackendBase, AutoBatchingMixin): + MIN_IDEAL_BATCH_DURATION = 0.2 + MAX_IDEAL_BATCH_DURATION = 1.0 + + def __init__(self, scheduler_host=None, scatter=None, + client=None, loop=None, **submit_kwargs): + if client is None: + if scheduler_host: + client = Client(scheduler_host, loop=loop, + set_as_default=False) + else: + try: + client = get_client() + except ValueError: + msg = ("To use Joblib with Dask first create a Dask Client" + "\n\n" + " from dask.distributed import Client\n" + " client = Client()\n" + "or\n" + " client = Client('scheduler-address:8786')") + raise ValueError(msg) + + self.client = client + + if scatter is not None and not isinstance(scatter, (list, tuple)): + raise TypeError("scatter must be a list/tuple, got " + "`%s`" % type(scatter).__name__) + + if scatter is not None and len(scatter) > 0: + # Keep a reference to the scattered data to keep the ids the same + self._scatter = list(scatter) + scattered = self.client.scatter(scatter, broadcast=True) + self.data_futures = {id(x): f for x, f in zip(scatter, scattered)} + else: + self._scatter = [] + self.data_futures = {} + self.task_futures = set() + self.submit_kwargs = submit_kwargs + + def __reduce__(self): + return (DaskDistributedBackend, ()) + + def get_nested_backend(self): + return DaskDistributedBackend(client=self.client) + + def configure(self, n_jobs=1, parallel=None, **backend_args): + return self.effective_n_jobs(n_jobs) + + def start_call(self): + self.call_data_futures = _WeakKeyDictionary() + + def stop_call(self): + # The explicit call to clear is required to break a cycling reference + # to the futures. + self.call_data_futures.clear() + + def effective_n_jobs(self, n_jobs): + return sum(self.client.ncores().values()) + + def _to_func_args(self, func): + collected_futures = [] + itemgetters = dict() + + # Futures that are dynamically generated during a single call to + # Parallel.__call__. + call_data_futures = getattr(self, 'call_data_futures', None) + + def maybe_to_futures(args): + for arg in args: + arg_id = id(arg) + if arg_id in itemgetters: + yield itemgetters[arg_id] + continue + + f = self.data_futures.get(arg_id, None) + if f is None and call_data_futures is not None: + try: + f = call_data_futures[arg] + except KeyError: + if is_weakrefable(arg) and sizeof(arg) > 1e3: + # Automatically scatter large objects to some of + # the workers to avoid duplicated data transfers. + # Rely on automated inter-worker data stealing if + # more workers need to reuse this data + # concurrently. + [f] = self.client.scatter([arg]) + call_data_futures[arg] = f + + if f is not None: + getter = itemgetter(len(collected_futures)) + collected_futures.append(f) + itemgetters[arg_id] = getter + arg = getter + yield arg + + tasks = [] + for f, args, kwargs in func.items: + args = list(maybe_to_futures(args)) + kwargs = dict(zip(kwargs.keys(), + maybe_to_futures(kwargs.values()))) + tasks.append((f, args, kwargs)) + + if not collected_futures: + return func, () + return (Batch(tasks), collected_futures) + + def apply_async(self, func, callback=None): + key = '%s-batch-%s' % (_funcname(func), uuid4().hex) + func, args = self._to_func_args(func) + + future = self.client.submit(func, *args, key=key, **self.submit_kwargs) + self.task_futures.add(future) + + @gen.coroutine + def callback_wrapper(): + result = yield _wait([future]) + self.task_futures.remove(future) + if callback is not None: + callback(result) # gets called in separate thread + + self.client.loop.add_callback(callback_wrapper) + + ref = weakref.ref(future) # avoid reference cycle + + def get(): + return ref().result() + + future.get = get # monkey patch to achieve AsyncResult API + return future + + def abort_everything(self, ensure_ready=True): + """ Tell the client to cancel any task submitted via this instance + + joblib.Parallel will never access those results + """ + self.client.cancel(self.task_futures) + self.task_futures.clear() + + @contextlib.contextmanager + def retrieval_context(self): + """Override ParallelBackendBase.retrieval_context to avoid deadlocks. + + This removes thread from the worker's thread pool (using 'secede'). + Seceding avoids deadlock in nested parallelism settings. + """ + # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how + # this is used. + if hasattr(thread_state, 'execution_state'): + # we are in a worker. Secede to avoid deadlock. + secede() + + yield + + if hasattr(thread_state, 'execution_state'): + rejoin() diff --git a/joblib/parallel.py b/joblib/parallel.py index 299f79f..266b2be 100644 --- a/joblib/parallel.py +++ b/joblib/parallel.py @@ -59,7 +59,8 @@ VALID_BACKEND_CONSTRAINTS = ('sharedmem', None) def _register_dask(): """ Register Dask Backend if called with parallel_backend("dask") """ try: - import distributed.joblib # noqa: #F401 + from ._dask import DaskDistributedBackend + register_parallel_backend('dask', DaskDistributedBackend) except ImportError: msg = ("To use the dask.distributed backend you must install both " "the `dask` and distributed modules.\n\n" diff --git a/setup.cfg b/setup.cfg index 40a2be3..3551800 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,8 +13,8 @@ universal=1 addopts = --doctest-glob="doc/*.rst" --doctest-modules - -p no:warnings - --ignore joblib/externals + -p no:warnings + --ignore joblib/externals testpaths = joblib [flake8]
readthedocs still documents joblib 0.11 Hello, https://pythonhosted.org/joblib/index.html still documents joblib 0.11 This is disturbing, especially because there are some "big" changes on 0.12 (backend, etc.), and the 0.12 switch already happened (at least with anoconda). Thanks.
joblib/joblib
diff --git a/conftest.py b/conftest.py index 3bf7460..bec21db 100644 --- a/conftest.py +++ b/conftest.py @@ -5,6 +5,10 @@ from _pytest.doctest import DoctestItem import logging from joblib.parallel import mp +try: + import lz4 +except ImportError: + lz4 = None def pytest_collection_modifyitems(config, items): @@ -30,6 +34,11 @@ def pytest_collection_modifyitems(config, items): if isinstance(item, DoctestItem): item.add_marker(skip_marker) + if lz4 is None: + for item in items: + if item.name == 'persistence.rst': + item.add_marker(pytest.mark.skip(reason='lz4 is missing')) + def pytest_configure(config): """Setup multiprocessing logging for the tests""" diff --git a/joblib/test/test_dask.py b/joblib/test/test_dask.py new file mode 100644 index 0000000..13aa8ee --- /dev/null +++ b/joblib/test/test_dask.py @@ -0,0 +1,257 @@ +from __future__ import print_function, division, absolute_import +import os + +import pytest +from random import random +from time import sleep + +from .. import Parallel, delayed, parallel_backend +from ..parallel import ThreadingBackend +from .._dask import DaskDistributedBackend + +distributed = pytest.importorskip('distributed') +from distributed import Client +from distributed.metrics import time +from distributed.utils_test import cluster, inc +from distributed.utils_test import loop # noqa F401 + + +def noop(*args, **kwargs): + pass + + +def slow_raise_value_error(condition, duration=0.05): + sleep(duration) + if condition: + raise ValueError("condition evaluated to True") + + +def test_simple(loop): # noqa: F811 + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_backend('dask') as (ba, _): + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + with pytest.raises(ValueError): + Parallel()(delayed(slow_raise_value_error)(i == 3) + for i in range(10)) + + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + +def random2(): + return random() + + +def test_dont_assume_function_purity(loop): # noqa: F811 + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_backend('dask') as (ba, _): + x, y = Parallel()(delayed(random2)() for i in range(2)) + assert x != y + + +def test_dask_funcname(loop): # noqa: F811 + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_backend('dask') as (ba, _): + x, y = Parallel()(delayed(inc)(i) for i in range(2)) + + def f(dask_scheduler): + return list(dask_scheduler.transition_log) + log = client.run_on_scheduler(f) + assert all(tup[0].startswith('inc-batch') for tup in log) + + +def add5(a, b, c, d=0, e=0): + return a + b + c + d + e + + +class CountSerialized(object): + def __init__(self, x): + self.x = x + self.count = 0 + + def __add__(self, other): + return self.x + getattr(other, 'x', other) + + __radd__ = __add__ + + def __reduce__(self): + self.count += 1 + return (CountSerialized, (self.x,)) + + +def test_manual_scatter(loop): # noqa: F811 + x = CountSerialized(1) + y = CountSerialized(2) + z = CountSerialized(3) + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_backend('dask', scatter=[x, y]) as (ba, _): + f = delayed(add5) + tasks = [f(x, y, z, d=4, e=5), + f(x, z, y, d=5, e=4), + f(y, x, z, d=x, e=5), + f(z, z, x, d=z, e=y)] + expected = [func(*args, **kwargs) + for func, args, kwargs in tasks] + results = Parallel()(tasks) + + # Scatter must take a list/tuple + with pytest.raises(TypeError): + with parallel_backend('dask', loop=loop, scatter=1): + pass + + assert results == expected + + # Scattered variables only serialized once + assert x.count == 1 + assert y.count == 1 + assert z.count == 4 + + +def test_auto_scatter(loop): # noqa: F811 + np = pytest.importorskip('numpy') + data = np.ones(int(1e7), dtype=np.uint8) + + def count_events(event_name, client): + worker_events = client.run(lambda dask_worker: dask_worker.log) + event_counts = {} + for w, events in worker_events.items(): + event_counts[w] = len([event for event in list(events) + if event[1] == event_name]) + return event_counts + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_backend('dask') as (ba, _): + # Passing the same data as arg and kwarg triggers a single + # scatter operation whose result is reused. + Parallel()(delayed(noop)(data, data, i, opt=data) + for i in range(5)) + # By default large array are automatically scattered with + # broadcast=1 which means that one worker must directly receive + # the data from the scatter operation once. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] + counts[b['address']] == 1 + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_backend('dask') as (ba, _): + Parallel()(delayed(noop)(data[:3], i) for i in range(5)) + # Small arrays are passed within the task definition without going + # through a scatter operation. + counts = count_events('receive-from-scatter', client) + assert counts[a['address']] == 0 + assert counts[b['address']] == 0 + + +def test_nested_backend_context_manager(loop): # noqa: F811 + def get_nested_pids(): + pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + return pids + + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: + with parallel_backend('dask') as (ba, _): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids, check_pickle=False)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + # No deadlocks + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_backend('dask') as (ba, _): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids, check_pickle=False)() + for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + +def test_errors(loop): # noqa: F811 + with pytest.raises(ValueError) as info: + with parallel_backend('dask'): + pass + + assert "create a dask client" in str(info.value).lower() + + +def test_correct_nested_backend(loop): # noqa: F811 + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + # No requirement, should be us + with parallel_backend('dask') as (ba, _): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require=None) for _ in range(1)) + assert isinstance(result[0][0][0], DaskDistributedBackend) + + # Require threads, should be threading + with parallel_backend('dask') as (ba, _): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require='sharedmem') + for _ in range(1)) + assert isinstance(result[0][0][0], ThreadingBackend) + + +def outer(nested_require): + return Parallel(n_jobs=2, prefer='threads')( + delayed(middle)(nested_require) for _ in range(1) + ) + + +def middle(require): + return Parallel(n_jobs=2, require=require)( + delayed(inner)() for _ in range(1) + ) + + +def inner(): + return Parallel()._backend + + +def test_secede_with_no_processes(loop): # noqa: F811 + # https://github.com/dask/distributed/issues/1775 + with Client(loop=loop, processes=False, set_as_default=True): + with parallel_backend('dask'): + Parallel(n_jobs=4)(delayed(id)(i) for i in range(2)) + + +def _worker_address(_): + from distributed import get_worker + return get_worker().address + + +def test_dask_backend_keywords(loop): # noqa: F811 + with cluster() as (s, [a, b]): + with Client(s['address'], loop=loop) as client: # noqa: F841 + with parallel_backend('dask', workers=a['address']) as (ba, _): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [a['address']] * 10 + + with parallel_backend('dask', workers=b['address']) as (ba, _): + seq = Parallel()( + delayed(_worker_address)(i) for i in range(10)) + assert seq == [b['address']] * 10 + + +def test_cleanup(loop): # noqa: F811 + with Client(processes=False, loop=loop) as client: + with parallel_backend('dask'): + Parallel()(delayed(inc)(i) for i in range(10)) + + start = time() + while client.cluster.scheduler.tasks: + sleep(0.01) + assert time() < start + 5 + + assert not client.futures diff --git a/joblib/test/test_parallel.py b/joblib/test/test_parallel.py index 789a1f4..ca5321a 100644 --- a/joblib/test/test_parallel.py +++ b/joblib/test/test_parallel.py @@ -16,6 +16,7 @@ from math import sqrt from time import sleep from pickle import PicklingError from multiprocessing import TimeoutError +import pytest import joblib from joblib import dump, load @@ -1093,6 +1094,8 @@ def test_lambda_expression(): def test_delayed_check_pickle_deprecated(): + if sys.version_info < (3, 4): + pytest.skip("Warning check unstable under Python 2, life is too short") class UnpicklableCallable(object): @@ -1288,21 +1291,21 @@ def test_external_backends(): assert isinstance(Parallel()._backend, ThreadingBackend) -def _recursive_backend_info(limit=3): +def _recursive_backend_info(limit=3, **kwargs): """Perform nested parallel calls and introspect the backend on the way""" with Parallel() as p: this_level = [(type(p._backend).__name__, p._backend.nesting_level)] if limit == 0: return this_level - results = p(delayed(_recursive_backend_info)(limit=limit - 1) + results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs) for i in range(1)) return this_level + results[0] @with_multiprocessing @parametrize('backend', ['loky', 'threading']) -def test_nested_parallel_limit(backend): +def test_nested_parallelism_limit(backend): with parallel_backend(backend, n_jobs=2): backend_types_and_levels = _recursive_backend_info() @@ -1316,6 +1319,28 @@ def test_nested_parallel_limit(backend): assert backend_types_and_levels == expected_types_and_levels +@with_numpy +def test_nested_parallelism_with_dask(): + distributed = pytest.importorskip('distributed') + client = distributed.Client() # noqa + + # 10 MB of data as argument to trigger implicit scattering + data = np.ones(int(1e7), dtype=np.uint8) + for i in range(2): + with parallel_backend('dask'): + backend_types_and_levels = _recursive_backend_info(data=data) + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + # No argument + with parallel_backend('dask'): + backend_types_and_levels = _recursive_backend_info() + assert len(backend_types_and_levels) == 4 + assert all(name == 'DaskDistributedBackend' + for name, _ in backend_types_and_levels) + + def _recursive_parallel(nesting_limit=None): """A horrible function that does recursive parallel calls""" return Parallel()(delayed(_recursive_parallel)() for i in range(2))
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 6 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-timeout", "memory_profiler", "lz4" ], "pre_install": [], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/joblib/joblib.git@b80c96cb905ca6a958a5769d772c76be1d8f3acf#egg=joblib lz4==3.1.10 memory-profiler==0.61.0 packaging==21.3 pluggy==1.0.0 psutil==7.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-timeout==2.1.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: joblib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - lz4==3.1.10 - memory-profiler==0.61.0 - packaging==21.3 - pluggy==1.0.0 - psutil==7.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-timeout==2.1.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/joblib
[ "joblib/test/test_parallel.py::test_cpu_count", "joblib/test/test_parallel.py::test_effective_n_jobs", "joblib/test/test_parallel.py::test_simple_parallel[2-1-None]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-loky]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-threading]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[2-1-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-None]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-loky]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-threading]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[2-2-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-None]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-loky]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-threading]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[2--1-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-None]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-loky]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-threading]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[2--2-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-None]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-loky]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-threading]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[11-1-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-None]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-loky]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-threading]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[11-2-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-None]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-loky]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-threading]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[11--1-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-None]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-loky]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-threading]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[11--2-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-None]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-loky]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-threading]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[100-1-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-None]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-loky]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-threading]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[100-2-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-None]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-loky]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-threading]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[100--1-backend9]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-None]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-loky]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-multiprocessing]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-sequential]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-threading]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-backend5]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-backend6]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-backend7]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-backend8]", "joblib/test/test_parallel.py::test_simple_parallel[100--2-backend9]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[None]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[loky]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[multiprocessing]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[sequential]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[threading]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[backend5]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[backend6]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[backend7]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[backend8]", "joblib/test/test_parallel.py::test_main_thread_renamed_no_warning[backend9]", "joblib/test/test_parallel.py::test_nested_parallel_warnings[loky-multiprocessing-True]", "joblib/test/test_parallel.py::test_nested_parallel_warnings[loky-loky-False]", "joblib/test/test_parallel.py::test_nested_parallel_warnings[multiprocessing-multiprocessing-True]", "joblib/test/test_parallel.py::test_nested_parallel_warnings[multiprocessing-loky-True]", "joblib/test/test_parallel.py::test_nested_parallel_warnings[threading-multiprocessing-True]", "joblib/test/test_parallel.py::test_nested_parallel_warnings[threading-loky-True]", "joblib/test/test_parallel.py::test_nested_loop[multiprocessing-multiprocessing]", "joblib/test/test_parallel.py::test_nested_loop[multiprocessing-threading]", "joblib/test/test_parallel.py::test_nested_loop[multiprocessing-sequential]", "joblib/test/test_parallel.py::test_nested_loop[multiprocessing-loky]", "joblib/test/test_parallel.py::test_nested_loop[threading-multiprocessing]", "joblib/test/test_parallel.py::test_nested_loop[threading-threading]", "joblib/test/test_parallel.py::test_nested_loop[threading-sequential]", "joblib/test/test_parallel.py::test_nested_loop[threading-loky]", "joblib/test/test_parallel.py::test_nested_loop[sequential-multiprocessing]", "joblib/test/test_parallel.py::test_nested_loop[sequential-threading]", "joblib/test/test_parallel.py::test_nested_loop[sequential-sequential]", "joblib/test/test_parallel.py::test_nested_loop[sequential-loky]", "joblib/test/test_parallel.py::test_nested_loop[loky-multiprocessing]", "joblib/test/test_parallel.py::test_nested_loop[loky-threading]", "joblib/test/test_parallel.py::test_nested_loop[loky-sequential]", "joblib/test/test_parallel.py::test_nested_loop[loky-loky]", "joblib/test/test_parallel.py::test_mutate_input_with_threads", "joblib/test/test_parallel.py::test_parallel_kwargs[1]", "joblib/test/test_parallel.py::test_parallel_kwargs[2]", "joblib/test/test_parallel.py::test_parallel_kwargs[3]", "joblib/test/test_parallel.py::test_parallel_as_context_manager[multiprocessing]", "joblib/test/test_parallel.py::test_parallel_as_context_manager[loky]", "joblib/test/test_parallel.py::test_parallel_as_context_manager[threading]", "joblib/test/test_parallel.py::test_parallel_pickling", "joblib/test/test_parallel.py::test_parallel_timeout_success[multiprocessing]", "joblib/test/test_parallel.py::test_parallel_timeout_success[loky]", "joblib/test/test_parallel.py::test_parallel_timeout_success[threading]", "joblib/test/test_parallel.py::test_parallel_timeout_fail[multiprocessing]", "joblib/test/test_parallel.py::test_parallel_timeout_fail[loky]", "joblib/test/test_parallel.py::test_parallel_timeout_fail[threading]", "joblib/test/test_parallel.py::test_error_capture[multiprocessing]", "joblib/test/test_parallel.py::test_error_capture[loky]", "joblib/test/test_parallel.py::test_dispatch_one_job[1-expected_queue0-multiprocessing]", "joblib/test/test_parallel.py::test_dispatch_one_job[1-expected_queue0-threading]", "joblib/test/test_parallel.py::test_dispatch_one_job[1-expected_queue0-sequential]", "joblib/test/test_parallel.py::test_dispatch_one_job[1-expected_queue0-loky]", "joblib/test/test_parallel.py::test_dispatch_one_job[4-expected_queue1-multiprocessing]", "joblib/test/test_parallel.py::test_dispatch_one_job[4-expected_queue1-threading]", "joblib/test/test_parallel.py::test_dispatch_one_job[4-expected_queue1-sequential]", "joblib/test/test_parallel.py::test_dispatch_one_job[4-expected_queue1-loky]", "joblib/test/test_parallel.py::test_dispatch_multiprocessing[multiprocessing]", "joblib/test/test_parallel.py::test_dispatch_multiprocessing[loky]", "joblib/test/test_parallel.py::test_dispatch_multiprocessing[threading]", "joblib/test/test_parallel.py::test_batching_auto_threading", "joblib/test/test_parallel.py::test_batching_auto_subprocesses[multiprocessing]", "joblib/test/test_parallel.py::test_batching_auto_subprocesses[loky]", "joblib/test/test_parallel.py::test_exception_dispatch", "joblib/test/test_parallel.py::test_nested_exception_dispatch[loky]", "joblib/test/test_parallel.py::test_nested_exception_dispatch[multiprocessing]", "joblib/test/test_parallel.py::test_nested_exception_dispatch[threading]", "joblib/test/test_parallel.py::test_multiple_spawning", "joblib/test/test_parallel.py::test_invalid_backend", "joblib/test/test_parallel.py::test_invalid_njobs[None]", "joblib/test/test_parallel.py::test_invalid_njobs[loky]", "joblib/test/test_parallel.py::test_invalid_njobs[multiprocessing]", "joblib/test/test_parallel.py::test_invalid_njobs[sequential]", "joblib/test/test_parallel.py::test_invalid_njobs[threading]", "joblib/test/test_parallel.py::test_invalid_njobs[backend5]", "joblib/test/test_parallel.py::test_invalid_njobs[backend6]", "joblib/test/test_parallel.py::test_invalid_njobs[backend7]", "joblib/test/test_parallel.py::test_invalid_njobs[backend8]", "joblib/test/test_parallel.py::test_invalid_njobs[backend9]", "joblib/test/test_parallel.py::test_register_parallel_backend", "joblib/test/test_parallel.py::test_overwrite_default_backend", "joblib/test/test_parallel.py::test_backend_context_manager[multiprocessing]", "joblib/test/test_parallel.py::test_backend_context_manager[loky]", "joblib/test/test_parallel.py::test_backend_context_manager[threading]", "joblib/test/test_parallel.py::test_backend_context_manager[test_backend_0]", "joblib/test/test_parallel.py::test_backend_context_manager[test_backend_1]", "joblib/test/test_parallel.py::test_backend_context_manager[test_backend_2]", "joblib/test/test_parallel.py::test_parameterized_backend_context_manager", "joblib/test/test_parallel.py::test_direct_parameterized_backend_context_manager", "joblib/test/test_parallel.py::test_nested_backend_context_manager", "joblib/test/test_parallel.py::test_retrieval_context", "joblib/test/test_parallel.py::test_joblib_exception", "joblib/test/test_parallel.py::test_safe_function", "joblib/test/test_parallel.py::test_invalid_batch_size[0]", "joblib/test/test_parallel.py::test_invalid_batch_size[-1]", "joblib/test/test_parallel.py::test_invalid_batch_size[1.42]", "joblib/test/test_parallel.py::test_dispatch_race_condition[2-2-all-auto]", "joblib/test/test_parallel.py::test_dispatch_race_condition[2-2-n_jobs-auto]", "joblib/test/test_parallel.py::test_dispatch_race_condition[10-2-n_jobs-auto0]", "joblib/test/test_parallel.py::test_dispatch_race_condition[517-2-n_jobs-auto]", "joblib/test/test_parallel.py::test_dispatch_race_condition[10-2-n_jobs-auto1]", "joblib/test/test_parallel.py::test_dispatch_race_condition[10-4-n_jobs-auto]", "joblib/test/test_parallel.py::test_dispatch_race_condition[200-12-n_jobs-auto]", "joblib/test/test_parallel.py::test_dispatch_race_condition[25-12-2", "joblib/test/test_parallel.py::test_dispatch_race_condition[250-12-all-1]", "joblib/test/test_parallel.py::test_dispatch_race_condition[250-12-2", "joblib/test/test_parallel.py::test_dispatch_race_condition[200-12-2", "joblib/test/test_parallel.py::test_default_mp_context", "joblib/test/test_parallel.py::test_parallel_with_interactively_defined_functions[multiprocessing]", "joblib/test/test_parallel.py::test_parallel_with_interactively_defined_functions[loky]", "joblib/test/test_parallel.py::test_parallel_with_interactively_defined_functions[spawn]", "joblib/test/test_parallel.py::test_parallel_with_unpicklable_functions_in_args[delayed-def", "joblib/test/test_parallel.py::test_parallel_with_unpicklable_functions_in_args[delayed-square", "joblib/test/test_parallel.py::test_parallel_with_unpicklable_functions_in_args[args-def", "joblib/test/test_parallel.py::test_parallel_with_unpicklable_functions_in_args[args-square", "joblib/test/test_parallel.py::test_parallel_with_unpicklable_functions_in_args[kwargs-def", "joblib/test/test_parallel.py::test_parallel_with_unpicklable_functions_in_args[kwargs-square", "joblib/test/test_parallel.py::test_parallel_with_interactively_defined_functions_default_backend", "joblib/test/test_parallel.py::test_parallel_with_exhausted_iterator", "joblib/test/test_parallel.py::test_warning_about_timeout_not_supported_by_backend", "joblib/test/test_parallel.py::test_abort_backend[1-None]", "joblib/test/test_parallel.py::test_abort_backend[1-loky]", "joblib/test/test_parallel.py::test_abort_backend[1-multiprocessing]", "joblib/test/test_parallel.py::test_abort_backend[1-sequential]", "joblib/test/test_parallel.py::test_abort_backend[1-threading]", "joblib/test/test_parallel.py::test_abort_backend[1-backend5]", "joblib/test/test_parallel.py::test_abort_backend[1-backend6]", "joblib/test/test_parallel.py::test_abort_backend[1-backend7]", "joblib/test/test_parallel.py::test_abort_backend[1-backend8]", "joblib/test/test_parallel.py::test_abort_backend[1-backend9]", "joblib/test/test_parallel.py::test_abort_backend[2-None]", "joblib/test/test_parallel.py::test_abort_backend[2-loky]", "joblib/test/test_parallel.py::test_abort_backend[2-multiprocessing]", "joblib/test/test_parallel.py::test_abort_backend[2-sequential]", "joblib/test/test_parallel.py::test_abort_backend[2-threading]", "joblib/test/test_parallel.py::test_abort_backend[2-backend5]", "joblib/test/test_parallel.py::test_abort_backend[2-backend6]", "joblib/test/test_parallel.py::test_abort_backend[2-backend7]", "joblib/test/test_parallel.py::test_abort_backend[2-backend8]", "joblib/test/test_parallel.py::test_abort_backend[2-backend9]", "joblib/test/test_parallel.py::test_abort_backend[-2-None]", "joblib/test/test_parallel.py::test_abort_backend[-2-loky]", "joblib/test/test_parallel.py::test_abort_backend[-2-multiprocessing]", "joblib/test/test_parallel.py::test_abort_backend[-2-sequential]", "joblib/test/test_parallel.py::test_abort_backend[-2-threading]", "joblib/test/test_parallel.py::test_abort_backend[-2-backend5]", "joblib/test/test_parallel.py::test_abort_backend[-2-backend6]", "joblib/test/test_parallel.py::test_abort_backend[-2-backend7]", "joblib/test/test_parallel.py::test_abort_backend[-2-backend8]", "joblib/test/test_parallel.py::test_abort_backend[-2-backend9]", "joblib/test/test_parallel.py::test_abort_backend[-1-None]", "joblib/test/test_parallel.py::test_abort_backend[-1-loky]", "joblib/test/test_parallel.py::test_abort_backend[-1-multiprocessing]", "joblib/test/test_parallel.py::test_abort_backend[-1-sequential]", "joblib/test/test_parallel.py::test_abort_backend[-1-threading]", "joblib/test/test_parallel.py::test_abort_backend[-1-backend5]", "joblib/test/test_parallel.py::test_abort_backend[-1-backend6]", "joblib/test/test_parallel.py::test_abort_backend[-1-backend7]", "joblib/test/test_parallel.py::test_abort_backend[-1-backend8]", "joblib/test/test_parallel.py::test_abort_backend[-1-backend9]", "joblib/test/test_parallel.py::test_lambda_expression", "joblib/test/test_parallel.py::test_delayed_check_pickle_deprecated", "joblib/test/test_parallel.py::test_backend_batch_statistics_reset[multiprocessing]", "joblib/test/test_parallel.py::test_backend_batch_statistics_reset[loky]", "joblib/test/test_parallel.py::test_backend_hinting_and_constraints", "joblib/test/test_parallel.py::test_backend_hinting_and_constraints_with_custom_backends", "joblib/test/test_parallel.py::test_invalid_backend_hinting_and_constraints", "joblib/test/test_parallel.py::test_global_parallel_backend", "joblib/test/test_parallel.py::test_external_backends", "joblib/test/test_parallel.py::test_nested_parallelism_limit[loky]", "joblib/test/test_parallel.py::test_nested_parallelism_limit[threading]", "joblib/test/test_parallel.py::test_thread_bomb_mitigation[loky]", "joblib/test/test_parallel.py::test_thread_bomb_mitigation[threading]" ]
[]
[]
[]
BSD 3-Clause "New" or "Revised" License
2,826
[ "joblib/_dask.py", "continuous_integration/appveyor/requirements.txt", ".travis.yml", "continuous_integration/travis/install.sh", "joblib/parallel.py", "setup.cfg", "examples/compressors_comparison.py" ]
[ "joblib/_dask.py", "continuous_integration/appveyor/requirements.txt", ".travis.yml", "continuous_integration/travis/install.sh", "joblib/parallel.py", "setup.cfg", "examples/compressors_comparison.py" ]
elastic__rally-540
f2557f8476f7ba34c5def1c0126218f19266dee1
2018-07-25 14:29:36
799e0642c27a0067931f305359a615cbf9fe2e20
diff --git a/esrally/mechanic/telemetry.py b/esrally/mechanic/telemetry.py index 70166f13..d6c13a1a 100644 --- a/esrally/mechanic/telemetry.py +++ b/esrally/mechanic/telemetry.py @@ -483,6 +483,7 @@ class NodeStatsRecorder: for node_stats in current_sample: node_name = node_stats["name"] collected_node_stats = collections.OrderedDict() + collected_node_stats["name"] = "node-stats" if self.include_indices: collected_node_stats.update(
Simplify filtering node-stats-related documents in Kibana The current structure of node-stats documents does not allow for easy filtering in Kibana because we don't have an explicit property in the documents that allow identify them. Hence, we should add a property (e.g. `"name": "node-stats"`) to clearly indicate that.
elastic/rally
diff --git a/tests/mechanic/telemetry_test.py b/tests/mechanic/telemetry_test.py index 687fda21..17cb2c4c 100644 --- a/tests/mechanic/telemetry_test.py +++ b/tests/mechanic/telemetry_test.py @@ -680,7 +680,11 @@ class NodeStatsRecorderTests(TestCase): recorder = telemetry.NodeStatsRecorder(telemetry_params, cluster_name="remote", client=client, metrics_store=metrics_store) recorder.record() - metrics_store_put_doc.assert_called_once_with(NodeStatsRecorderTests.default_stats_response_flattened, + expected_doc = collections.OrderedDict() + expected_doc["name"] = "node-stats" + expected_doc.update(NodeStatsRecorderTests.default_stats_response_flattened) + + metrics_store_put_doc.assert_called_once_with(expected_doc, level=MetaInfoScope.node, node_name="rally0", meta_data=metrics_store_meta_data) @@ -864,7 +868,8 @@ class NodeStatsRecorderTests(TestCase): recorder.record() metrics_store_put_doc.assert_called_once_with( - {"indices_docs_count": 76892364, + {"name": "node-stats", + "indices_docs_count": 76892364, "indices_docs_deleted": 324530, "indices_fielddata_evictions": 17, "indices_fielddata_memory_size_in_bytes": 6936,
{ "commit_name": "merge_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-benchmark" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 elasticsearch==6.2.0 -e git+https://github.com/elastic/rally.git@f2557f8476f7ba34c5def1c0126218f19266dee1#egg=esrally importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==2.9.5 jsonschema==2.5.1 MarkupSafe==2.0.1 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==5.4.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work py-cpuinfo==3.2.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-benchmark==3.4.1 tabulate==0.8.1 thespian==3.9.2 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.22 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: rally channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - elasticsearch==6.2.0 - jinja2==2.9.5 - jsonschema==2.5.1 - markupsafe==2.0.1 - psutil==5.4.0 - py-cpuinfo==3.2.0 - pytest-benchmark==3.4.1 - tabulate==0.8.1 - thespian==3.9.2 - urllib3==1.22 prefix: /opt/conda/envs/rally
[ "tests/mechanic/telemetry_test.py::NodeStatsRecorderTests::test_stores_all_nodes_stats", "tests/mechanic/telemetry_test.py::NodeStatsRecorderTests::test_stores_default_nodes_stats" ]
[]
[ "tests/mechanic/telemetry_test.py::TelemetryTests::test_merges_options_set_by_different_devices", "tests/mechanic/telemetry_test.py::StartupTimeTests::test_store_calculated_metrics", "tests/mechanic/telemetry_test.py::MergePartsDeviceTests::test_store_calculated_metrics", "tests/mechanic/telemetry_test.py::MergePartsDeviceTests::test_store_nothing_if_no_metrics_present", "tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_java_9_or_above_custom_recording_template", "tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_java_9_or_above_default_recording_template", "tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_pre_java_9_custom_recording_template", "tests/mechanic/telemetry_test.py::JfrTests::test_sets_options_for_pre_java_9_default_recording_template", "tests/mechanic/telemetry_test.py::GcTests::test_sets_options_for_java_9_or_above", "tests/mechanic/telemetry_test.py::GcTests::test_sets_options_for_pre_java_9", "tests/mechanic/telemetry_test.py::CcrStatsTests::test_negative_sample_interval_forbidden", "tests/mechanic/telemetry_test.py::CcrStatsTests::test_wrong_cluster_name_in_ccr_stats_indices_forbidden", "tests/mechanic/telemetry_test.py::CcrStatsRecorderTests::test_raises_exception_on_transport_error", "tests/mechanic/telemetry_test.py::CcrStatsRecorderTests::test_stores_default_ccr_stats", "tests/mechanic/telemetry_test.py::CcrStatsRecorderTests::test_stores_default_ccr_stats_many_shards", "tests/mechanic/telemetry_test.py::CcrStatsRecorderTests::test_stores_filtered_ccr_stats", "tests/mechanic/telemetry_test.py::NodeStatsRecorderTests::test_flatten_indices_fields", "tests/mechanic/telemetry_test.py::NodeStatsRecorderTests::test_negative_sample_interval_forbidden", "tests/mechanic/telemetry_test.py::ClusterEnvironmentInfoTests::test_stores_cluster_level_metrics_on_attach", "tests/mechanic/telemetry_test.py::NodeEnvironmentInfoTests::test_stores_node_level_metrics_on_attach", "tests/mechanic/telemetry_test.py::ExternalEnvironmentInfoTests::test_fallback_when_host_not_available", "tests/mechanic/telemetry_test.py::ExternalEnvironmentInfoTests::test_stores_all_node_metrics_on_attach", "tests/mechanic/telemetry_test.py::ClusterMetaDataInfoTests::test_enriches_cluster_nodes_for_elasticsearch_1_x", "tests/mechanic/telemetry_test.py::ClusterMetaDataInfoTests::test_enriches_cluster_nodes_for_elasticsearch_after_1_x", "tests/mechanic/telemetry_test.py::GcTimesSummaryTests::test_stores_only_diff_of_gc_times", "tests/mechanic/telemetry_test.py::IndexStatsTests::test_index_stats_are_per_lap", "tests/mechanic/telemetry_test.py::IndexStatsTests::test_stores_available_index_stats", "tests/mechanic/telemetry_test.py::IndexSizeTests::test_stores_index_size_for_data_paths", "tests/mechanic/telemetry_test.py::IndexSizeTests::test_stores_nothing_if_no_data_path" ]
[]
Apache License 2.0
2,827
[ "esrally/mechanic/telemetry.py" ]
[ "esrally/mechanic/telemetry.py" ]
rabitt__pysox-75
0c87e45ab170b6e08e5ebacc9328c6053db95710
2018-07-25 15:32:07
8a6748d32b6917d5ef920895fbfc734dda21f294
coveralls: [![Coverage Status](https://coveralls.io/builds/18160988/badge)](https://coveralls.io/builds/18160988) Coverage increased (+0.005%) to 98.759% when pulling **b74ac52f02d9cbf04a5e7c1c297039d823060ec7 on lostanlen:patch-1** into **f5671ebe3e5d83277833a0a768a422e9e14a4b0d on rabitt:master**. lostanlen: Had an offline conversation with `rabitt` and she asked me to convert the `bitrate` field into an Python integer, expressed in bits per second. I'm going to write a simple-minded string parser in `file_info.bitrate` to strip the `k` suffix rabitt: @lostanlen thanks for catching this, and for the PR! Before I merge, could you please squash your commits down to one? lostanlen: Tests are passing, this is ready for CR. This PR have important breaking changes, so it would be best to schedule if for `v1.4` lostanlen: I implemented the fix to issue #78 Tests are passing. Coverage decreases a little bit because: 1. I don't know how to produce a "zero bit depth" audio file 2. I don't know how to produce an audio file with less than 1kbps bit rate Some help for 1 and/or 2 would be greatly appreciated. @rabitt This is ready for a new round of CR rabitt: > Tests are passing. Coverage decreases a little bit because: > > 1. I don't know how to produce a "zero bit depth" audio file >2. I don't know how to produce an audio file with less than 1kbps bit rate > > Some help for 1 and/or 2 would be greatly appreciated. Just spent some time trying to create "valid" (files SoX knows how to read) files with these stats, and so far I haven't found a way to do it either. The obvious degenerate cases (e.g. a truly empty file with an audio file extension) can't be read in the first place and creates a higher level SoX error. And an 0-length audio file with a header readable by SoX has bit depth information that is nonzero. If anyone else has ideas how to hit these cases, help would be welcome! Otherwise, I'd say we ignore these new uncovered lines and merge despite the decreased coverage. lostanlen: Rebased. Commits squashed into two (one for issue #68, one for issue #78). I might have goofed up with the source tree; do tell me in case I did rabitt: @lostanlen I don't see the rebased commits - just the original set. Did you `--force` push?
diff --git a/README.md b/README.md index 3325a5b..7fe349d 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Python wrapper around sox. [Read the Docs here](http://pysox.readthedocs.org). [![PyPI version](https://badge.fury.io/py/sox.svg)](https://badge.fury.io/py/sox) [![Documentation Status](https://readthedocs.org/projects/resampy/badge/?version=latest)](http://pysox.readthedocs.io/en/latest/?badge=latest) -[![GitHub license](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://raw.githubusercontent.com/rabitt/pysox/master/LICENSE.md) +[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/rabitt/pysox/master/LICENSE.md) [![PyPI](https://img.shields.io/pypi/pyversions/Django.svg?maxAge=2592000)]() [![Build Status](https://travis-ci.org/rabitt/pysox.svg?branch=master)](https://travis-ci.org/rabitt/pysox) @@ -67,7 +67,7 @@ nosetests . ```python import sox -# create transformer +# create trasnformer tfm = sox.Transformer() # trim the audio between 5 and 10.5 seconds. tfm.trim(5, 10.5) diff --git a/setup.py b/setup.py index f6ed177..8fc4c8b 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ if __name__ == "__main__": package_data={'sox': []}, long_description="""Python wrapper around SoX.""", keywords='audio effects SoX', - license='BSD-3-Clause', + license='MIT', install_requires=[ ], extras_require={ diff --git a/sox/core.py b/sox/core.py index 118cd4a..178fbb9 100644 --- a/sox/core.py +++ b/sox/core.py @@ -6,7 +6,7 @@ from subprocess import CalledProcessError from . import NO_SOX -SOXI_ARGS = ['b', 'c', 'a', 'D', 'e', 't', 's', 'r'] +SOXI_ARGS = ['B', 'b', 'c', 'a', 'D', 'e', 't', 's', 'r'] ENCODING_VALS = [ 'signed-integer', 'unsigned-integer', 'floating-point', 'a-law', 'u-law', diff --git a/sox/file_info.py b/sox/file_info.py index 9077f1a..d4a373d 100644 --- a/sox/file_info.py +++ b/sox/file_info.py @@ -10,9 +10,9 @@ from .core import sox from .core import enquote_filepath -def bitrate(input_filepath): +def bitdepth(input_filepath): ''' - Number of bits per sample (0 if not applicable). + Number of bits per sample, or None if not applicable. Parameters ---------- @@ -21,17 +21,50 @@ def bitrate(input_filepath): Returns ------- - bitrate : int - number of bits per sample - returns 0 if not applicable + bitdepth : int or None + Number of bits per sample. + Returns None if not applicable. ''' + validate_input_file(input_filepath) output = soxi(input_filepath, 'b') if output == '0': - logger.warning("Bitrate unavailable for %s", input_filepath) + logger.warning("Bit depth unavailable for %s", input_filepath) + return None return int(output) +def bitrate(input_filepath): + ''' + Bit rate averaged over the whole file. + Expressed in bytes per second (bps), or None if not applicable. + + Parameters + ---------- + input_filepath : str + Path to audio file. + + Returns + ------- + bitrate : float or None + Bit rate, expressed in bytes per second. + Returns None if not applicable. + ''' + + validate_input_file(input_filepath) + output = soxi(input_filepath, 'B') + # The characters below stand for kilo, Mega, Giga, etc. + greek_prefixes = '\0kMGTPEZY' + if output == "0": + logger.warning("Bit rate unavailable for %s", input_filepath) + return None + elif output[-1] in greek_prefixes: + multiplier = 1000.0**(greek_prefixes.index(output[-1])) + return float(output[:-1])*multiplier + else: + return float(output[:-1]) + + def channels(input_filepath): ''' Show number of channels. @@ -73,7 +106,7 @@ def comments(input_filepath): def duration(input_filepath): ''' - Show duration in seconds (0 if unavailable). + Show duration in seconds, or None if not available. Parameters ---------- @@ -82,15 +115,15 @@ def duration(input_filepath): Returns ------- - duration : float + duration : float or None Duration of audio file in seconds. - If unavailable or empty, returns 0. + If unavailable or empty, returns None. ''' validate_input_file(input_filepath) output = soxi(input_filepath, 'D') - if output == '0': + if float(output) == 0.0: logger.warning("Duration unavailable for %s", input_filepath) - + return None return float(output) @@ -134,7 +167,7 @@ def file_type(input_filepath): def num_samples(input_filepath): ''' - Show number of samples (0 if unavailable). + Show number of samples, or None if unavailable. Parameters ---------- @@ -143,14 +176,15 @@ def num_samples(input_filepath): Returns ------- - n_samples : int + n_samples : int or None total number of samples in audio file. - Returns 0 if empty or unavailable + Returns None if empty or unavailable. ''' validate_input_file(input_filepath) output = soxi(input_filepath, 's') if output == '0': logger.warning("Number of samples unavailable for %s", input_filepath) + return None return int(output) @@ -313,6 +347,7 @@ def info(filepath): Dictionary of file information. Fields are: * channels * sample_rate + * bitdepth * bitrate * duration * num_samples @@ -322,6 +357,7 @@ def info(filepath): info_dictionary = { 'channels': channels(filepath), 'sample_rate': sample_rate(filepath), + 'bitdepth': bitdepth(filepath), 'bitrate': bitrate(filepath), 'duration': duration(filepath), 'num_samples': num_samples(filepath), diff --git a/sox/transform.py b/sox/transform.py index df4609e..3dd9a1c 100644 --- a/sox/transform.py +++ b/sox/transform.py @@ -2682,7 +2682,7 @@ class Transformer(object): Returns ------- power_spectrum : list - List of frequency (Hz), amplitude pairs. + List of frequency (Hz), amplitdue pairs. See Also -------- @@ -2725,7 +2725,7 @@ class Transformer(object): Returns ------- stats_dict : dict - List of frequency (Hz), amplitude pairs. + List of frequency (Hz), amplitdue pairs. See Also -------- diff --git a/sox/version.py b/sox/version.py index ad0d588..22a6515 100644 --- a/sox/version.py +++ b/sox/version.py @@ -3,4 +3,4 @@ """Version info""" short_version = '1.3' -version = '1.3.5' +version = '1.3.4'
Is `file_info.bitrate` correct? Hello This function seems to be returning the bit depth (aka bits per sample) not the bit rate. `soxi -b` is the bit depth. `soxi -B` is the bitrate. Happy to submit a pull request with a fix and a second function for the bit depth. Cheers.
rabitt/pysox
diff --git a/tests/test_file_info.py b/tests/test_file_info.py index cca028f..d960434 100644 --- a/tests/test_file_info.py +++ b/tests/test_file_info.py @@ -22,19 +22,40 @@ class TestBitrate(unittest.TestCase): def test_wav(self): actual = file_info.bitrate(INPUT_FILE) - expected = 16 + expected = 706000.0 self.assertEqual(expected, actual) def test_aiff(self): actual = file_info.bitrate(INPUT_FILE2) - expected = 32 + expected = 768000.0 self.assertEqual(expected, actual) def test_empty(self): actual = file_info.bitrate(EMPTY_FILE) + expected = None + +class TestBitdepth(unittest.TestCase): + + def test_wav(self): + actual = file_info.bitdepth(INPUT_FILE) + expected = 16 + self.assertEqual(expected, actual) + + def test_aiff(self): + actual = file_info.bitdepth(INPUT_FILE2) + expected = 32 + self.assertEqual(expected, actual) + + def test_empty(self): + actual = file_info.bitdepth(INPUT_FILE) expected = 16 self.assertEqual(expected, actual) + def test_aiff(self): + actual = file_info.bitdepth(INPUT_FILE2) + expected = 32 + self.assertEqual(expected, actual) + class TestChannels(unittest.TestCase): @@ -91,7 +112,7 @@ class TestDuration(unittest.TestCase): def test_empty(self): actual = file_info.duration(EMPTY_FILE) - expected = 0 + expected = None self.assertEqual(expected, actual) @@ -145,7 +166,7 @@ class TestNumSamples(unittest.TestCase): def test_empty(self): actual = file_info.num_samples(EMPTY_FILE) - expected = 0 + expected = None self.assertEqual(expected, actual) @@ -220,7 +241,8 @@ class TestInfo(unittest.TestCase): expected = { 'channels': 1, 'sample_rate': 44100.0, - 'bitrate': 16, + 'bitdepth': 16, + 'bitrate': 706000.0, 'duration': 10.0, 'num_samples': 441000, 'encoding': 'Signed Integer PCM',
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 6 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[tests]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y sox gcc" ], "python": "3.9", "reqs_path": [ "docs/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 docutils==0.21.2 exceptiongroup==1.2.2 execnet==2.1.1 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 numpydoc==1.8.0 packaging==24.2 pep8==1.7.1 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 pytest-cache==1.0 pytest-cov==6.0.0 pytest-pep8==1.0.6 requests==2.32.3 snowballstemmer==2.2.0 -e git+https://github.com/rabitt/pysox.git@0c87e45ab170b6e08e5ebacc9328c6053db95710#egg=sox Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tabulate==0.9.0 tomli==2.2.1 urllib3==2.3.0 zipp==3.21.0
name: pysox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - execnet==2.1.1 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - numpydoc==1.8.0 - packaging==24.2 - pep8==1.7.1 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - pytest-cache==1.0 - pytest-cov==6.0.0 - pytest-pep8==1.0.6 - requests==2.32.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tabulate==0.9.0 - tomli==2.2.1 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/pysox
[ "tests/test_file_info.py::TestBitrate::test_aiff", "tests/test_file_info.py::TestBitrate::test_wav", "tests/test_file_info.py::TestBitdepth::test_aiff", "tests/test_file_info.py::TestBitdepth::test_empty", "tests/test_file_info.py::TestBitdepth::test_wav", "tests/test_file_info.py::TestDuration::test_empty", "tests/test_file_info.py::TestNumSamples::test_empty", "tests/test_file_info.py::TestInfo::test_dictionary" ]
[]
[ "tests/test_file_info.py::TestBitrate::test_empty", "tests/test_file_info.py::TestChannels::test_aiff", "tests/test_file_info.py::TestChannels::test_empty", "tests/test_file_info.py::TestChannels::test_wav", "tests/test_file_info.py::TestComments::test_aiff", "tests/test_file_info.py::TestComments::test_empty", "tests/test_file_info.py::TestComments::test_wav", "tests/test_file_info.py::TestDuration::test_aiff", "tests/test_file_info.py::TestDuration::test_spacey_wav", "tests/test_file_info.py::TestDuration::test_wav", "tests/test_file_info.py::TestEncoding::test_aiff", "tests/test_file_info.py::TestEncoding::test_empty", "tests/test_file_info.py::TestEncoding::test_wav", "tests/test_file_info.py::TestFileType::test_aiff", "tests/test_file_info.py::TestFileType::test_empty", "tests/test_file_info.py::TestFileType::test_wav", "tests/test_file_info.py::TestNumSamples::test_aiff", "tests/test_file_info.py::TestNumSamples::test_wav", "tests/test_file_info.py::TestSampleRate::test_aiff", "tests/test_file_info.py::TestSampleRate::test_empty", "tests/test_file_info.py::TestSampleRate::test_wav", "tests/test_file_info.py::TestSilent::test_empty", "tests/test_file_info.py::TestSilent::test_nonsilent", "tests/test_file_info.py::TestSilent::test_silent", "tests/test_file_info.py::TestFileExtension::test_ext1", "tests/test_file_info.py::TestFileExtension::test_ext2", "tests/test_file_info.py::TestFileExtension::test_ext3", "tests/test_file_info.py::TestFileExtension::test_ext4", "tests/test_file_info.py::TestFileExtension::test_ext5", "tests/test_file_info.py::TestValidateInputFile::test_invalid_format", "tests/test_file_info.py::TestValidateInputFile::test_nonexistent", "tests/test_file_info.py::TestValidateInputFile::test_valid", "tests/test_file_info.py::TestValidateInputFile::test_valid_wspaces", "tests/test_file_info.py::TestValidateInputFileList::test_empty_list", "tests/test_file_info.py::TestValidateInputFileList::test_invalid_format", "tests/test_file_info.py::TestValidateInputFileList::test_len_one_list", "tests/test_file_info.py::TestValidateInputFileList::test_nonexistent", "tests/test_file_info.py::TestValidateInputFileList::test_nonlist", "tests/test_file_info.py::TestValidateInputFileList::test_valid", "tests/test_file_info.py::TestValidateOutputFile::test_file_exists", "tests/test_file_info.py::TestValidateOutputFile::test_invalid_format", "tests/test_file_info.py::TestValidateOutputFile::test_not_writeable", "tests/test_file_info.py::TestValidateOutputFile::test_valid", "tests/test_file_info.py::TestStat::test_silent_file", "tests/test_file_info.py::TestStatCall::test_stat_call", "tests/test_file_info.py::TestParseStat::test_empty", "tests/test_file_info.py::TestParseStat::test_real_output", "tests/test_file_info.py::TestParseStat::test_simple" ]
[]
BSD 3-Clause "New" or "Revised" License
2,828
[ "setup.py", "sox/transform.py", "sox/core.py", "README.md", "sox/version.py", "sox/file_info.py" ]
[ "setup.py", "sox/transform.py", "sox/core.py", "README.md", "sox/version.py", "sox/file_info.py" ]
simonw__datasette-349
3ac21c749881d0fb1c35b0f9b7a819e29f61c5c1
2018-07-26 05:03:22
e1db8194e8c1d7f361fd0c1c3fc1b91d6aa920e5
diff --git a/datasette/app.py b/datasette/app.py index 66a7573a..052131d0 100644 --- a/datasette/app.py +++ b/datasette/app.py @@ -2,6 +2,7 @@ import asyncio import click import collections import hashlib +import importlib import itertools import os import sqlite3 @@ -41,6 +42,11 @@ from .utils import ( from .inspect import inspect_hash, inspect_views, inspect_tables from .version import __version__ +default_plugins = ( + "datasette.publish.heroku", + "datasette.publish.now", +) + app_root = Path(__file__).parent.parent connections = threading.local() @@ -49,6 +55,11 @@ pm = pluggy.PluginManager("datasette") pm.add_hookspecs(hookspecs) pm.load_setuptools_entrypoints("datasette") +# Load default plugins +for plugin in default_plugins: + mod = importlib.import_module(plugin) + pm.register(mod, plugin) + ConfigOption = collections.namedtuple( "ConfigOption", ("name", "default", "help") diff --git a/datasette/cli.py b/datasette/cli.py index 72770326..820367ac 100644 --- a/datasette/cli.py +++ b/datasette/cli.py @@ -4,32 +4,17 @@ from click_default_group import DefaultGroup import json import os import shutil -from subprocess import call, check_output +from subprocess import call import sys -from .app import Datasette, DEFAULT_CONFIG, CONFIG_OPTIONS +from .app import Datasette, DEFAULT_CONFIG, CONFIG_OPTIONS, pm from .utils import ( temporary_docker_directory, - temporary_heroku_directory, value_as_boolean, + StaticMount, ValueAsBooleanError, ) -class StaticMount(click.ParamType): - name = "static mount" - - def convert(self, value, param, ctx): - if ":" not in value: - self.fail( - '"{}" should be of format mountpoint:directory'.format(value), - param, ctx - ) - path, dirpath = value.split(":") - if not os.path.exists(dirpath) or not os.path.isdir(dirpath): - self.fail("%s is not a valid directory path" % value, param, ctx) - return path, dirpath - - class Config(click.ParamType): name = "config" @@ -93,202 +78,14 @@ def inspect(files, inspect_file, sqlite_extensions): open(inspect_file, "w").write(json.dumps(app.inspect(), indent=2)) [email protected]() [email protected]("publisher", type=click.Choice(["now", "heroku"])) [email protected]("files", type=click.Path(exists=True), nargs=-1) [email protected]( - "-n", - "--name", - default="datasette", - help="Application name to use when deploying", -) [email protected]( - "-m", - "--metadata", - type=click.File(mode="r"), - help="Path to JSON file containing metadata to publish", -) [email protected]("--extra-options", help="Extra options to pass to datasette serve") [email protected]("--force", is_flag=True, help="Pass --force option to now") [email protected]("--branch", help="Install datasette from a GitHub branch e.g. master") [email protected]("--token", help="Auth token to use for deploy (Now only)") [email protected]( - "--template-dir", - type=click.Path(exists=True, file_okay=False, dir_okay=True), - help="Path to directory containing custom templates", -) [email protected]( - "--plugins-dir", - type=click.Path(exists=True, file_okay=False, dir_okay=True), - help="Path to directory containing custom plugins", -) [email protected]( - "--static", - type=StaticMount(), - help="mountpoint:path-to-directory for serving static files", - multiple=True, -) [email protected]( - "--install", - help="Additional packages (e.g. plugins) to install", - multiple=True, -) [email protected]( - "--spatialite", is_flag=True, help="Enable SpatialLite extension" -) [email protected]("--version-note", help="Additional note to show on /-/versions") [email protected]("--title", help="Title for metadata") [email protected]("--license", help="License label for metadata") [email protected]("--license_url", help="License URL for metadata") [email protected]("--source", help="Source label for metadata") [email protected]("--source_url", help="Source URL for metadata") -def publish( - publisher, - files, - name, - metadata, - extra_options, - force, - branch, - token, - template_dir, - plugins_dir, - static, - install, - spatialite, - version_note, - **extra_metadata -): - """ - Publish specified SQLite database files to the internet along with a datasette API. - - Options for PUBLISHER: - * 'now' - You must have Zeit Now installed: https://zeit.co/now - * 'heroku' - You must have Heroku installed: https://cli.heroku.com/ - - Example usage: datasette publish now my-database.db - """ - - def _fail_if_publish_binary_not_installed(binary, publish_target, install_link): - """Exit (with error message) if ``binary` isn't installed""" - if not shutil.which(binary): - click.secho( - "Publishing to {publish_target} requires {binary} to be installed and configured".format( - publish_target=publish_target, binary=binary - ), - bg="red", - fg="white", - bold=True, - err=True, - ) - click.echo( - "Follow the instructions at {install_link}".format( - install_link=install_link - ), - err=True, - ) - sys.exit(1) - - if publisher == "now": - _fail_if_publish_binary_not_installed("now", "Zeit Now", "https://zeit.co/now") - if extra_options: - extra_options += " " - else: - extra_options = "" - extra_options += "--config force_https_urls:on" - - with temporary_docker_directory( - files, - name, - metadata, - extra_options, - branch, - template_dir, - plugins_dir, - static, - install, - spatialite, - version_note, - extra_metadata, - ): - args = [] - if force: - args.append("--force") - if token: - args.append("--token={}".format(token)) - if args: - call(["now"] + args) - else: - call("now") - - elif publisher == "heroku": - _fail_if_publish_binary_not_installed( - "heroku", "Heroku", "https://cli.heroku.com" - ) - if spatialite: - click.secho( - "The --spatialite option is not yet supported for Heroku", - bg="red", - fg="white", - bold=True, - err=True, - ) - click.echo( - "See https://github.com/simonw/datasette/issues/301", - err=True, - ) - sys.exit(1) - - # Check for heroku-builds plugin - plugins = [ - line.split()[0] for line in check_output(["heroku", "plugins"]).splitlines() - ] - if b"heroku-builds" not in plugins: - click.echo( - "Publishing to Heroku requires the heroku-builds plugin to be installed." - ) - click.confirm( - "Install it? (this will run `heroku plugins:install heroku-builds`)", - abort=True, - ) - call(["heroku", "plugins:install", "heroku-builds"]) - - with temporary_heroku_directory( - files, - name, - metadata, - extra_options, - branch, - template_dir, - plugins_dir, - static, - install, - extra_metadata, - ): - - app_name = None - if name: - # Check to see if this app already exists - list_output = check_output(["heroku", "apps:list", "--json"]).decode('utf8') - apps = json.loads(list_output) [email protected]() +def publish(): + "Publish specified SQLite database files to the internet along with a Datasette-powered interface and API" + pass - for app in apps: - if app['name'] == name: - app_name = name - break - - if not app_name: - # Create a new app - cmd = ["heroku", "apps:create"] - if name: - cmd.append(name) - cmd.append("--json") - create_output = check_output(cmd).decode( - "utf8" - ) - app_name = json.loads(create_output)["name"] - call(["heroku", "builds:create", "-a", app_name]) +# Register publish plugins +pm.hook.publish_subcommand(publish=publish) @cli.command() diff --git a/datasette/hookspecs.py b/datasette/hookspecs.py index 240b58db..9546eebf 100644 --- a/datasette/hookspecs.py +++ b/datasette/hookspecs.py @@ -23,3 +23,8 @@ def extra_css_urls(): @hookspec def extra_js_urls(): "Extra JavaScript URLs added by this plugin" + + +@hookspec +def publish_subcommand(publish): + "Subcommands for 'datasette publish'" diff --git a/datasette/publish/__init__.py b/datasette/publish/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/datasette/publish/common.py b/datasette/publish/common.py new file mode 100644 index 00000000..9dd2ae9e --- /dev/null +++ b/datasette/publish/common.py @@ -0,0 +1,68 @@ +from ..utils import StaticMount +import click +import shutil +import sys + + +def add_common_publish_arguments_and_options(subcommand): + for decorator in reversed(( + click.argument("files", type=click.Path(exists=True), nargs=-1), + click.option( + "-m", + "--metadata", + type=click.File(mode="r"), + help="Path to JSON file containing metadata to publish", + ), + click.option("--extra-options", help="Extra options to pass to datasette serve"), + click.option("--branch", help="Install datasette from a GitHub branch e.g. master"), + click.option( + "--template-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True), + help="Path to directory containing custom templates", + ), + click.option( + "--plugins-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True), + help="Path to directory containing custom plugins", + ), + click.option( + "--static", + type=StaticMount(), + help="mountpoint:path-to-directory for serving static files", + multiple=True, + ), + click.option( + "--install", + help="Additional packages (e.g. plugins) to install", + multiple=True, + ), + click.option("--version-note", help="Additional note to show on /-/versions"), + click.option("--title", help="Title for metadata"), + click.option("--license", help="License label for metadata"), + click.option("--license_url", help="License URL for metadata"), + click.option("--source", help="Source label for metadata"), + click.option("--source_url", help="Source URL for metadata"), + )): + subcommand = decorator(subcommand) + return subcommand + + +def fail_if_publish_binary_not_installed(binary, publish_target, install_link): + """Exit (with error message) if ``binary` isn't installed""" + if not shutil.which(binary): + click.secho( + "Publishing to {publish_target} requires {binary} to be installed and configured".format( + publish_target=publish_target, binary=binary + ), + bg="red", + fg="white", + bold=True, + err=True, + ) + click.echo( + "Follow the instructions at {install_link}".format( + install_link=install_link + ), + err=True, + ) + sys.exit(1) diff --git a/datasette/publish/heroku.py b/datasette/publish/heroku.py new file mode 100644 index 00000000..af53a37a --- /dev/null +++ b/datasette/publish/heroku.py @@ -0,0 +1,99 @@ +from datasette import hookimpl +import click +import json +from subprocess import call, check_output + +from .common import ( + add_common_publish_arguments_and_options, + fail_if_publish_binary_not_installed, +) +from ..utils import temporary_heroku_directory + + +@hookimpl +def publish_subcommand(publish): + @publish.command() + @add_common_publish_arguments_and_options + @click.option( + "-n", + "--name", + default="datasette", + help="Application name to use when deploying", + ) + def heroku( + files, + metadata, + extra_options, + branch, + template_dir, + plugins_dir, + static, + install, + version_note, + title, + license, + license_url, + source, + source_url, + name, + ): + fail_if_publish_binary_not_installed( + "heroku", "Heroku", "https://cli.heroku.com" + ) + + # Check for heroku-builds plugin + plugins = [ + line.split()[0] for line in check_output(["heroku", "plugins"]).splitlines() + ] + if b"heroku-builds" not in plugins: + click.echo( + "Publishing to Heroku requires the heroku-builds plugin to be installed." + ) + click.confirm( + "Install it? (this will run `heroku plugins:install heroku-builds`)", + abort=True, + ) + call(["heroku", "plugins:install", "heroku-builds"]) + + with temporary_heroku_directory( + files, + name, + metadata, + extra_options, + branch, + template_dir, + plugins_dir, + static, + install, + version_note, + { + "title": title, + "license": license, + "license_url": license_url, + "source": source, + "source_url": source_url, + }, + ): + app_name = None + if name: + # Check to see if this app already exists + list_output = check_output(["heroku", "apps:list", "--json"]).decode( + "utf8" + ) + apps = json.loads(list_output) + + for app in apps: + if app["name"] == name: + app_name = name + break + + if not app_name: + # Create a new app + cmd = ["heroku", "apps:create"] + if name: + cmd.append(name) + cmd.append("--json") + create_output = check_output(cmd).decode("utf8") + app_name = json.loads(create_output)["name"] + + call(["heroku", "builds:create", "-a", app_name]) diff --git a/datasette/publish/now.py b/datasette/publish/now.py new file mode 100644 index 00000000..fd081111 --- /dev/null +++ b/datasette/publish/now.py @@ -0,0 +1,80 @@ +from datasette import hookimpl +import click +from subprocess import call + +from .common import ( + add_common_publish_arguments_and_options, + fail_if_publish_binary_not_installed, +) +from ..utils import temporary_docker_directory + + +@hookimpl +def publish_subcommand(publish): + @publish.command() + @add_common_publish_arguments_and_options + @click.option( + "-n", + "--name", + default="datasette", + help="Application name to use when deploying", + ) + @click.option("--force", is_flag=True, help="Pass --force option to now") + @click.option("--token", help="Auth token to use for deploy (Now only)") + @click.option("--spatialite", is_flag=True, help="Enable SpatialLite extension") + def now( + files, + metadata, + extra_options, + branch, + template_dir, + plugins_dir, + static, + install, + version_note, + title, + license, + license_url, + source, + source_url, + name, + force, + token, + spatialite, + ): + fail_if_publish_binary_not_installed("now", "Zeit Now", "https://zeit.co/now") + if extra_options: + extra_options += " " + else: + extra_options = "" + extra_options += "--config force_https_urls:on" + + with temporary_docker_directory( + files, + name, + metadata, + extra_options, + branch, + template_dir, + plugins_dir, + static, + install, + spatialite, + version_note, + { + "title": title, + "license": license, + "license_url": license_url, + "source": source, + "source_url": source_url, + }, + ): + args = [] + if force: + args.append("--force") + if token: + args.append("--token={}".format(token)) + if args: + call(["now"] + args) + else: + call("now") diff --git a/datasette/utils.py b/datasette/utils.py index 7419f9ae..f95d0695 100644 --- a/datasette/utils.py +++ b/datasette/utils.py @@ -1,6 +1,7 @@ from contextlib import contextmanager from collections import OrderedDict import base64 +import click import hashlib import imp import json @@ -376,6 +377,7 @@ def temporary_heroku_directory( plugins_dir, static, install, + version_note, extra_metadata=None ): # FIXME: lots of duplicated code from above @@ -430,7 +432,8 @@ def temporary_heroku_directory( os.path.join(tmp.name, 'plugins') ) extras.extend(['--plugins-dir', 'plugins/']) - + if version_note: + extras.extend(['--version-note', version_note]) if metadata: extras.extend(['--metadata', 'metadata.json']) if extra_options: @@ -876,3 +879,18 @@ def remove_infinites(row): for c in row ] return row + + +class StaticMount(click.ParamType): + name = "static mount" + + def convert(self, value, param, ctx): + if ":" not in value: + self.fail( + '"{}" should be of format mountpoint:directory'.format(value), + param, ctx + ) + path, dirpath = value.split(":") + if not os.path.exists(dirpath) or not os.path.isdir(dirpath): + self.fail("%s is not a valid directory path" % value, param, ctx) + return path, dirpath diff --git a/docs/datasette-publish-heroku-help.txt b/docs/datasette-publish-heroku-help.txt new file mode 100644 index 00000000..f82eaf3e --- /dev/null +++ b/docs/datasette-publish-heroku-help.txt @@ -0,0 +1,20 @@ +$ datasette publish heroku --help + +Usage: datasette publish heroku [OPTIONS] [FILES]... + +Options: + -m, --metadata FILENAME Path to JSON file containing metadata to publish + --extra-options TEXT Extra options to pass to datasette serve + --branch TEXT Install datasette from a GitHub branch e.g. master + --template-dir DIRECTORY Path to directory containing custom templates + --plugins-dir DIRECTORY Path to directory containing custom plugins + --static STATIC MOUNT mountpoint:path-to-directory for serving static files + --install TEXT Additional packages (e.g. plugins) to install + --version-note TEXT Additional note to show on /-/versions + --title TEXT Title for metadata + --license TEXT License label for metadata + --license_url TEXT License URL for metadata + --source TEXT Source label for metadata + --source_url TEXT Source URL for metadata + -n, --name TEXT Application name to use when deploying + --help Show this message and exit. diff --git a/docs/datasette-publish-help.txt b/docs/datasette-publish-now-help.txt similarity index 74% rename from docs/datasette-publish-help.txt rename to docs/datasette-publish-now-help.txt index 04cda361..ce09030f 100644 --- a/docs/datasette-publish-help.txt +++ b/docs/datasette-publish-now-help.txt @@ -1,31 +1,23 @@ -$ datasette publish --help +$ datasette publish now --help -Usage: datasette publish [OPTIONS] PUBLISHER [FILES]... - - Publish specified SQLite database files to the internet along with a datasette API. - - Options for PUBLISHER: * 'now' - You must have Zeit Now installed: - https://zeit.co/now * 'heroku' - You must have Heroku installed: - https://cli.heroku.com/ - - Example usage: datasette publish now my-database.db +Usage: datasette publish now [OPTIONS] [FILES]... Options: - -n, --name TEXT Application name to use when deploying -m, --metadata FILENAME Path to JSON file containing metadata to publish --extra-options TEXT Extra options to pass to datasette serve - --force Pass --force option to now --branch TEXT Install datasette from a GitHub branch e.g. master - --token TEXT Auth token to use for deploy (Now only) --template-dir DIRECTORY Path to directory containing custom templates --plugins-dir DIRECTORY Path to directory containing custom plugins --static STATIC MOUNT mountpoint:path-to-directory for serving static files --install TEXT Additional packages (e.g. plugins) to install - --spatialite Enable SpatialLite extension --version-note TEXT Additional note to show on /-/versions --title TEXT Title for metadata --license TEXT License label for metadata --license_url TEXT License URL for metadata --source TEXT Source label for metadata --source_url TEXT Source URL for metadata + -n, --name TEXT Application name to use when deploying + --force Pass --force option to now + --token TEXT Auth token to use for deploy (Now only) + --spatialite Enable SpatialLite extension --help Show this message and exit. diff --git a/docs/plugins.rst b/docs/plugins.rst index 24635191..fc351bf6 100644 --- a/docs/plugins.rst +++ b/docs/plugins.rst @@ -258,3 +258,12 @@ you have one: return [ '/-/static-plugins/your_plugin/app.js' ] + +publish_subcommand(publish) +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This hook allows you to create new providers for the ``datasette publish`` +command. Datasette uses this hook internally to implement the default ``now`` +and ``heroku`` subcommands, so you can read +`their source <https://github.com/simonw/datasette/tree/master/datasette/publish>`_ +to see examples of this hook in action. diff --git a/docs/publish.rst b/docs/publish.rst index 1abe8881..8350afe0 100644 --- a/docs/publish.rst +++ b/docs/publish.rst @@ -36,6 +36,8 @@ You can use ``anything-you-like.now.sh``, provided no one else has already regis You can also use custom domains, if you `first register them with Zeit Now <https://zeit.co/docs/features/aliases>`_. +.. literalinclude:: datasette-publish-now-help.txt + Publishing to Heroku -------------------- @@ -51,6 +53,8 @@ This will output some details about the new deployment, including a URL like thi You can specify a custom app name by passing ``-n my-app-name`` to the publish command. This will also allow you to overwrite an existing app. +.. literalinclude:: datasette-publish-heroku-help.txt + Custom metadata and plugins --------------------------- @@ -71,9 +75,6 @@ You can also specify plugins you would like to install. For example, if you want datasette publish now mydatabase.db --install=datasette-vega -A full list of options can be seen by running ``datasette publish --help``: - -.. literalinclude:: datasette-publish-help.txt datasette package ================= diff --git a/update-docs-help.py b/update-docs-help.py index 8f6e8956..ea311c57 100644 --- a/update-docs-help.py +++ b/update-docs-help.py @@ -7,14 +7,17 @@ docs_path = Path(__file__).parent / "docs" includes = ( ("serve", "datasette-serve-help.txt"), ("package", "datasette-package-help.txt"), - ("publish", "datasette-publish-help.txt"), + ("publish now", "datasette-publish-now-help.txt"), + ("publish heroku", "datasette-publish-heroku-help.txt"), ) def update_help_includes(): for name, filename in includes: runner = CliRunner() - result = runner.invoke(cli, [name, "--help"], terminal_width=88) + result = runner.invoke( + cli, name.split() + ["--help"], terminal_width=88 + ) actual = "$ datasette {} --help\n\n{}".format( name, result.output )
Plugin support for datasette publish It should be possible to support additional deployment options by writing a plugin (see #59). As part of this, rewrite the Heroku and Now publishers to be implemented as plugins (they will still ship with datasette by default). Maybe `datasette package` should be changed to being part of publish instead, `datasette publish docker` perhaps? Refs #14
simonw/datasette
diff --git a/tests/test_docs.py b/tests/test_docs.py index ffbb7ca1..b8581e17 100644 --- a/tests/test_docs.py +++ b/tests/test_docs.py @@ -22,21 +22,22 @@ def test_config_options_are_documented(config): assert config.name in get_headings("config.rst") [email protected]('name,filename', ( - ('serve', 'datasette-serve-help.txt'), - ('package', 'datasette-package-help.txt'), - ('publish', 'datasette-publish-help.txt'), [email protected]("name,filename", ( + ("serve", "datasette-serve-help.txt"), + ("package", "datasette-package-help.txt"), + ("publish now", "datasette-publish-now-help.txt"), + ("publish heroku", "datasette-publish-heroku-help.txt"), )) def test_help_includes(name, filename): expected = open(str(docs_path / filename)).read() runner = CliRunner() - result = runner.invoke(cli, [name, '--help'], terminal_width=88) - actual = '$ datasette {} --help\n\n{}'.format( + result = runner.invoke(cli, name.split() + ["--help"], terminal_width=88) + actual = "$ datasette {} --help\n\n{}".format( name, result.output ) # actual has "Usage: cli package [OPTIONS] FILES" # because it doesn't know that cli will be aliased to datasette - expected = expected.replace('Usage: datasette', 'Usage: cli') + expected = expected.replace("Usage: datasette", "Usage: cli") assert expected == actual
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_issue_reference", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 8 }
0.24
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "aiohttp" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiofiles==0.8.0 aiohttp==3.8.6 aiosignal==1.2.0 async-timeout==4.0.2 asynctest==0.13.0 attrs==22.2.0 certifi==2021.5.30 charset-normalizer==3.0.1 click==6.7 click-default-group==1.2 -e git+https://github.com/simonw/datasette.git@3ac21c749881d0fb1c35b0f9b7a819e29f61c5c1#egg=datasette frozenlist==1.2.0 httptools==0.6.0 hupper==1.0 idna==3.10 idna-ssl==1.1.0 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==2.10 MarkupSafe==2.0.1 multidict==5.2.0 packaging==21.3 Pint==0.8.1 pluggy==0.13.1 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 sanic==0.7.0 tomli==1.2.3 typing_extensions==4.1.1 ujson==4.3.0 uvloop==0.14.0 websockets==9.1 yarl==1.7.2 zipp==3.6.0
name: datasette channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiofiles==0.8.0 - aiohttp==3.8.6 - aiosignal==1.2.0 - async-timeout==4.0.2 - asynctest==0.13.0 - attrs==22.2.0 - charset-normalizer==3.0.1 - click==6.7 - click-default-group==1.2 - frozenlist==1.2.0 - httptools==0.6.0 - hupper==1.0 - idna==3.10 - idna-ssl==1.1.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==2.10 - markupsafe==2.0.1 - multidict==5.2.0 - packaging==21.3 - pint==0.8.1 - pluggy==0.13.1 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - sanic==0.7.0 - tomli==1.2.3 - typing-extensions==4.1.1 - ujson==4.3.0 - uvloop==0.14.0 - websockets==9.1 - yarl==1.7.2 - zipp==3.6.0 prefix: /opt/conda/envs/datasette
[ "tests/test_docs.py::test_help_includes[publish", "tests/test_docs.py::test_plugin_hooks_are_documented[publish_subcommand]" ]
[]
[ "tests/test_docs.py::test_config_options_are_documented[config0]", "tests/test_docs.py::test_config_options_are_documented[config1]", "tests/test_docs.py::test_config_options_are_documented[config2]", "tests/test_docs.py::test_config_options_are_documented[config3]", "tests/test_docs.py::test_config_options_are_documented[config4]", "tests/test_docs.py::test_config_options_are_documented[config5]", "tests/test_docs.py::test_config_options_are_documented[config6]", "tests/test_docs.py::test_config_options_are_documented[config7]", "tests/test_docs.py::test_config_options_are_documented[config8]", "tests/test_docs.py::test_config_options_are_documented[config9]", "tests/test_docs.py::test_config_options_are_documented[config10]", "tests/test_docs.py::test_config_options_are_documented[config11]", "tests/test_docs.py::test_config_options_are_documented[config12]", "tests/test_docs.py::test_config_options_are_documented[config13]", "tests/test_docs.py::test_config_options_are_documented[config14]", "tests/test_docs.py::test_config_options_are_documented[config15]", "tests/test_docs.py::test_config_options_are_documented[config16]", "tests/test_docs.py::test_help_includes[serve-datasette-serve-help.txt]", "tests/test_docs.py::test_help_includes[package-datasette-package-help.txt]", "tests/test_docs.py::test_plugin_hooks_are_documented[extra_css_urls]", "tests/test_docs.py::test_plugin_hooks_are_documented[extra_js_urls]", "tests/test_docs.py::test_plugin_hooks_are_documented[prepare_connection]", "tests/test_docs.py::test_plugin_hooks_are_documented[prepare_jinja2_environment]" ]
[]
Apache License 2.0
2,829
[ "docs/publish.rst", "datasette/publish/heroku.py", "datasette/app.py", "update-docs-help.py", "datasette/publish/now.py", "datasette/cli.py", "datasette/utils.py", "datasette/publish/__init__.py", "docs/datasette-publish-help.txt", "docs/plugins.rst", "datasette/publish/common.py", "docs/datasette-publish-heroku-help.txt", "datasette/hookspecs.py" ]
[ "docs/publish.rst", "docs/datasette-publish-now-help.txt", "datasette/publish/heroku.py", "datasette/app.py", "update-docs-help.py", "datasette/publish/now.py", "datasette/cli.py", "datasette/utils.py", "datasette/publish/__init__.py", "docs/plugins.rst", "datasette/publish/common.py", "docs/datasette-publish-heroku-help.txt", "datasette/hookspecs.py" ]
encode__starlette-33
590db3d6ea6c7f3b03013209822cda2c54dc38ae
2018-07-26 13:38:33
590db3d6ea6c7f3b03013209822cda2c54dc38ae
simonw: Feel free to squash-merge this so it doesn't go in as three separate commits.
diff --git a/starlette/response.py b/starlette/response.py index db5bac8..f189ef3 100644 --- a/starlette/response.py +++ b/starlette/response.py @@ -177,5 +177,9 @@ class FileResponse(Response): chunk = await file.read(self.chunk_size) more_body = len(chunk) == self.chunk_size await send( - {"type": "http.response.body", "body": chunk, "more_body": False} + { + "type": "http.response.body", + "body": chunk, + "more_body": more_body, + } )
Error serving static files larger than 4096 bytes Static files larger than 4096 bytes do not appear to be served correctly. Here's a test I just wrote that illustrates the problem: https://github.com/simonw/starlette/commit/e2d6665fa5c32e77a3fe22836b14620a7f5999bb Running that test gives me the following output: ``` (venv) starlette $ PYTHONPATH=. pytest -k test_large_staticfile ===================================================== test session starts ====================================================== platform darwin -- Python 3.6.5, pytest-3.6.1, py-1.5.3, pluggy-0.6.0 rootdir: /Users/simonw/Dropbox/Development/starlette, inifile: collected 43 items / 42 deselected tests/test_staticfiles.py F [100%] =========================================================== FAILURES =========================================================== ____________________________________________________ test_large_staticfile _____________________________________________________ tmpdir = local('/private/var/folders/jj/fngnv0810tn2lt_kd3911pdc0000gp/T/pytest-of-simonw/pytest-8/test_large_staticfile0') def test_large_staticfile(tmpdir): path = os.path.join(tmpdir, "example.txt") content = "this is a lot of content" * 200 print("content len = ", len(content)) with open(path, "w") as file: file.write(content) app = StaticFile(path=path) client = TestClient(app) response = client.get("/") assert response.status_code == 200 > assert len(content) == len(response.text) E AssertionError: assert 4800 == 4096 E + where 4800 = len('this is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of cont...is is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of content') E + and 4096 = len(' contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot...ontentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of') E + where ' contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot...ontentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of' = <Response [200]>.text tests/test_staticfiles.py:30: AssertionError ```
encode/starlette
diff --git a/tests/test_response.py b/tests/test_response.py index 670aa6c..bbfde25 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -70,8 +70,9 @@ def test_response_headers(): def test_file_response(tmpdir): path = os.path.join(tmpdir, "xyz") + content = b"<file content>" * 1000 with open(path, "wb") as file: - file.write(b"<file content>") + file.write(content) def app(scope): return FileResponse(path=path, filename="example.png") @@ -80,7 +81,7 @@ def test_file_response(tmpdir): response = client.get("/") expected_disposition = 'attachment; filename="example.png"' assert response.status_code == 200 - assert response.content == b"<file content>" + assert response.content == content assert response.headers["content-type"] == "image/png" assert response.headers["content-disposition"] == expected_disposition assert "content-length" in response.headers diff --git a/tests/test_staticfiles.py b/tests/test_staticfiles.py index ac5a6ae..d615ba6 100644 --- a/tests/test_staticfiles.py +++ b/tests/test_staticfiles.py @@ -16,6 +16,21 @@ def test_staticfile(tmpdir): assert response.text == "<file content>" +def test_large_staticfile(tmpdir): + path = os.path.join(tmpdir, "example.txt") + content = "this is a lot of content" * 200 + print("content len = ", len(content)) + with open(path, "w") as file: + file.write(content) + + app = StaticFile(path=path) + client = TestClient(app) + response = client.get("/") + assert response.status_code == 200 + assert len(content) == len(response.text) + assert content == response.text + + def test_staticfile_post(tmpdir): path = os.path.join(tmpdir, "example.txt") with open(path, "w") as file:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "black", "twine" ], "pre_install": [], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aiofiles==24.1.0 backports.tarfile==1.2.0 black==25.1.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 click==8.1.8 codecov==2.1.13 coverage==7.8.0 cryptography==44.0.2 docutils==0.21.2 exceptiongroup==1.2.2 id==1.5.0 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 keyring==25.6.0 markdown-it-py==3.0.0 mdurl==0.1.2 more-itertools==10.6.0 mypy-extensions==1.0.0 nh3==0.2.21 packaging==24.2 pathspec==0.12.1 platformdirs==4.3.7 pluggy==1.5.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 pytest-cov==6.0.0 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 -e git+https://github.com/encode/starlette.git@590db3d6ea6c7f3b03013209822cda2c54dc38ae#egg=starlette tomli==2.2.1 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 zipp==3.21.0
name: starlette channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - aiofiles==24.1.0 - backports-tarfile==1.2.0 - black==25.1.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - click==8.1.8 - codecov==2.1.13 - coverage==7.8.0 - cryptography==44.0.2 - docutils==0.21.2 - exceptiongroup==1.2.2 - id==1.5.0 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - keyring==25.6.0 - markdown-it-py==3.0.0 - mdurl==0.1.2 - more-itertools==10.6.0 - mypy-extensions==1.0.0 - nh3==0.2.21 - packaging==24.2 - pathspec==0.12.1 - platformdirs==4.3.7 - pluggy==1.5.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - pytest-cov==6.0.0 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - tomli==2.2.1 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/starlette
[ "tests/test_response.py::test_file_response", "tests/test_staticfiles.py::test_large_staticfile" ]
[]
[ "tests/test_response.py::test_text_response", "tests/test_response.py::test_bytes_response", "tests/test_response.py::test_streaming_response", "tests/test_response.py::test_response_headers", "tests/test_staticfiles.py::test_staticfile", "tests/test_staticfiles.py::test_staticfile_post", "tests/test_staticfiles.py::test_staticfile_with_directory_raises_error", "tests/test_staticfiles.py::test_staticfile_with_missing_file_raises_error", "tests/test_staticfiles.py::test_staticfiles", "tests/test_staticfiles.py::test_staticfiles_post", "tests/test_staticfiles.py::test_staticfiles_with_directory_returns_404", "tests/test_staticfiles.py::test_staticfiles_with_missing_file_returns_404", "tests/test_staticfiles.py::test_staticfiles_configured_with_missing_directory", "tests/test_staticfiles.py::test_staticfiles_configured_with_file_instead_of_directory", "tests/test_staticfiles.py::test_staticfiles_config_check_occurs_only_once", "tests/test_staticfiles.py::test_staticfiles_prevents_breaking_out_of_directory" ]
[]
BSD 3-Clause "New" or "Revised" License
2,830
[ "starlette/response.py" ]
[ "starlette/response.py" ]
encode__uvicorn-165
5e2da780ae775dbe963066554d3bb230a41c7f9c
2018-07-26 14:15:12
173e8f6278160a5acc78b8aa48858e1cc3b81ae3
diff --git a/uvicorn/protocols/http/h11_impl.py b/uvicorn/protocols/http/h11_impl.py index ab9cdaf..598b5e0 100644 --- a/uvicorn/protocols/http/h11_impl.py +++ b/uvicorn/protocols/http/h11_impl.py @@ -465,7 +465,10 @@ class RequestResponseCycle: more_body = message.get("more_body", False) # Write response body - event = h11.Data(data=body) + if self.scope['method'] == "HEAD": + event = h11.Data() + else: + event = h11.Data(data=body) output = self.conn.send(event) self.transport.write(output) diff --git a/uvicorn/protocols/http/httptools_impl.py b/uvicorn/protocols/http/httptools_impl.py index cfd7887..8a593fc 100644 --- a/uvicorn/protocols/http/httptools_impl.py +++ b/uvicorn/protocols/http/httptools_impl.py @@ -467,7 +467,9 @@ class RequestResponseCycle: more_body = message.get("more_body", False) # Write response body - if self.chunked_encoding: + if self.scope['method'] == "HEAD": + self.expected_content_length = 0 + elif self.chunked_encoding: content = [b"%x\r\n" % len(body), body, b"\r\n"] if not more_body: content.append(b"0\r\n\r\n")
Drop response body on HEAD requests
encode/uvicorn
diff --git a/tests/protocols/test_http.py b/tests/protocols/test_http.py index dd4b62c..4ec0a9b 100644 --- a/tests/protocols/test_http.py +++ b/tests/protocols/test_http.py @@ -48,6 +48,8 @@ class Response: SIMPLE_GET_REQUEST = b"\r\n".join([b"GET / HTTP/1.1", b"Host: example.org", b"", b""]) +SIMPLE_HEAD_REQUEST = b"\r\n".join([b"HEAD / HTTP/1.1", b"Host: example.org", b"", b""]) + SIMPLE_POST_REQUEST = b"\r\n".join( [ b"POST / HTTP/1.1", @@ -175,6 +177,18 @@ def test_get_request(protocol_cls): assert b"Hello, world" in protocol.transport.buffer [email protected]("protocol_cls", [HttpToolsProtocol, H11Protocol]) +def test_head_request(protocol_cls): + def app(scope): + return Response("Hello, world", media_type="text/plain") + + protocol = get_connected_protocol(app, protocol_cls) + protocol.data_received(SIMPLE_HEAD_REQUEST) + protocol.loop.run_one() + assert b"HTTP/1.1 200 OK" in protocol.transport.buffer + assert b"Hello, world" not in protocol.transport.buffer + + @pytest.mark.parametrize("protocol_cls", [HttpToolsProtocol, H11Protocol]) def test_post_request(protocol_cls): class App:
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi charset-normalizer==3.4.1 click==8.1.8 codecov==2.1.13 coverage==7.2.7 exceptiongroup==1.2.2 h11==0.14.0 httptools==0.6.0 idna==3.10 importlib-metadata==6.7.0 iniconfig==2.0.0 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 pytest-cov==4.1.0 requests==2.31.0 tomli==2.0.1 typing_extensions==4.7.1 urllib3==2.0.7 -e git+https://github.com/encode/uvicorn.git@5e2da780ae775dbe963066554d3bb230a41c7f9c#egg=uvicorn uvloop==0.18.0 websockets==11.0.3 zipp==3.15.0
name: uvicorn channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==3.4.1 - click==8.1.8 - codecov==2.1.13 - coverage==7.2.7 - exceptiongroup==1.2.2 - h11==0.14.0 - httptools==0.6.0 - idna==3.10 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-cov==4.1.0 - requests==2.31.0 - tomli==2.0.1 - typing-extensions==4.7.1 - urllib3==2.0.7 - uvloop==0.18.0 - websockets==11.0.3 - zipp==3.15.0 prefix: /opt/conda/envs/uvicorn
[ "tests/protocols/test_http.py::test_head_request[HttpToolsProtocol]" ]
[]
[ "tests/protocols/test_http.py::test_get_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_get_request[H11Protocol]", "tests/protocols/test_http.py::test_head_request[H11Protocol]", "tests/protocols/test_http.py::test_post_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_post_request[H11Protocol]", "tests/protocols/test_http.py::test_keepalive[HttpToolsProtocol]", "tests/protocols/test_http.py::test_keepalive[H11Protocol]", "tests/protocols/test_http.py::test_keepalive_timeout[HttpToolsProtocol]", "tests/protocols/test_http.py::test_keepalive_timeout[H11Protocol]", "tests/protocols/test_http.py::test_close[HttpToolsProtocol]", "tests/protocols/test_http.py::test_close[H11Protocol]", "tests/protocols/test_http.py::test_chunked_encoding[HttpToolsProtocol]", "tests/protocols/test_http.py::test_chunked_encoding[H11Protocol]", "tests/protocols/test_http.py::test_pipelined_requests[HttpToolsProtocol]", "tests/protocols/test_http.py::test_pipelined_requests[H11Protocol]", "tests/protocols/test_http.py::test_undersized_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_undersized_request[H11Protocol]", "tests/protocols/test_http.py::test_oversized_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_oversized_request[H11Protocol]", "tests/protocols/test_http.py::test_large_post_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_large_post_request[H11Protocol]", "tests/protocols/test_http.py::test_invalid_http[HttpToolsProtocol]", "tests/protocols/test_http.py::test_invalid_http[H11Protocol]", "tests/protocols/test_http.py::test_app_exception[HttpToolsProtocol]", "tests/protocols/test_http.py::test_app_exception[H11Protocol]", "tests/protocols/test_http.py::test_app_init_exception[HttpToolsProtocol]", "tests/protocols/test_http.py::test_app_init_exception[H11Protocol]", "tests/protocols/test_http.py::test_exception_during_response[HttpToolsProtocol]", "tests/protocols/test_http.py::test_exception_during_response[H11Protocol]", "tests/protocols/test_http.py::test_no_response_returned[HttpToolsProtocol]", "tests/protocols/test_http.py::test_no_response_returned[H11Protocol]", "tests/protocols/test_http.py::test_partial_response_returned[HttpToolsProtocol]", "tests/protocols/test_http.py::test_partial_response_returned[H11Protocol]", "tests/protocols/test_http.py::test_duplicate_start_message[HttpToolsProtocol]", "tests/protocols/test_http.py::test_duplicate_start_message[H11Protocol]", "tests/protocols/test_http.py::test_missing_start_message[HttpToolsProtocol]", "tests/protocols/test_http.py::test_missing_start_message[H11Protocol]", "tests/protocols/test_http.py::test_message_after_body_complete[HttpToolsProtocol]", "tests/protocols/test_http.py::test_message_after_body_complete[H11Protocol]", "tests/protocols/test_http.py::test_value_returned[HttpToolsProtocol]", "tests/protocols/test_http.py::test_value_returned[H11Protocol]", "tests/protocols/test_http.py::test_early_disconnect[HttpToolsProtocol]", "tests/protocols/test_http.py::test_early_disconnect[H11Protocol]", "tests/protocols/test_http.py::test_early_response[HttpToolsProtocol]", "tests/protocols/test_http.py::test_early_response[H11Protocol]", "tests/protocols/test_http.py::test_read_after_response[HttpToolsProtocol]", "tests/protocols/test_http.py::test_read_after_response[H11Protocol]", "tests/protocols/test_http.py::test_http10_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_http10_request[H11Protocol]", "tests/protocols/test_http.py::test_root_path[HttpToolsProtocol]", "tests/protocols/test_http.py::test_root_path[H11Protocol]", "tests/protocols/test_http.py::test_proxy_headers[HttpToolsProtocol]", "tests/protocols/test_http.py::test_proxy_headers[H11Protocol]", "tests/protocols/test_http.py::test_max_concurrency[HttpToolsProtocol]", "tests/protocols/test_http.py::test_max_concurrency[H11Protocol]", "tests/protocols/test_http.py::test_shutdown_during_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_shutdown_during_request[H11Protocol]", "tests/protocols/test_http.py::test_shutdown_during_idle[HttpToolsProtocol]", "tests/protocols/test_http.py::test_shutdown_during_idle[H11Protocol]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,831
[ "uvicorn/protocols/http/httptools_impl.py", "uvicorn/protocols/http/h11_impl.py" ]
[ "uvicorn/protocols/http/httptools_impl.py", "uvicorn/protocols/http/h11_impl.py" ]
encode__uvicorn-166
fa5185f348aa3be814f986abdec967d9de4a57f3
2018-07-26 14:49:29
173e8f6278160a5acc78b8aa48858e1cc3b81ae3
diff --git a/uvicorn/protocols/http/h11_impl.py b/uvicorn/protocols/http/h11_impl.py index 598b5e0..23c021d 100644 --- a/uvicorn/protocols/http/h11_impl.py +++ b/uvicorn/protocols/http/h11_impl.py @@ -361,6 +361,7 @@ class RequestResponseCycle: # Connection state self.disconnected = False self.keep_alive = True + self.waiting_for_100_continue = conn.they_are_waiting_for_100_continue # Request state self.body = b"" @@ -433,6 +434,7 @@ class RequestResponseCycle: raise RuntimeError(msg % message_type) self.response_started = True + self.waiting_for_100_continue = False status_code = message["status"] headers = DEFAULT_HEADERS + message.get("headers", []) @@ -465,7 +467,7 @@ class RequestResponseCycle: more_body = message.get("more_body", False) # Write response body - if self.scope['method'] == "HEAD": + if self.scope["method"] == "HEAD": event = h11.Data() else: event = h11.Data(data=body) @@ -492,6 +494,14 @@ class RequestResponseCycle: self.on_response() async def receive(self): + if self.waiting_for_100_continue and not self.transport.is_closing(): + event = h11.InformationalResponse( + status_code=100, headers=[], reason="Continue" + ) + output = self.conn.send(event) + self.transport.write(output) + self.waiting_for_100_continue = False + self.flow.resume_reading() await self.message_event.wait() self.message_event.clear() diff --git a/uvicorn/protocols/http/httptools_impl.py b/uvicorn/protocols/http/httptools_impl.py index 8a593fc..cc40fa6 100644 --- a/uvicorn/protocols/http/httptools_impl.py +++ b/uvicorn/protocols/http/httptools_impl.py @@ -144,6 +144,7 @@ class HttpToolsProtocol(asyncio.Protocol): # Per-request state self.scope = None self.headers = None + self.expect_100_continue = False self.cycle = None self.message_event = asyncio.Event() @@ -196,6 +197,7 @@ class HttpToolsProtocol(asyncio.Protocol): def on_url(self, url): method = self.parser.get_method() parsed_url = httptools.parse_url(url) + self.expect_100_continue = False self.headers = [] self.scope = { "type": "http", @@ -211,7 +213,10 @@ class HttpToolsProtocol(asyncio.Protocol): } def on_header(self, name: bytes, value: bytes): - self.headers.append((name.lower(), value)) + name = name.lower() + if name == b"expect" and value.lower() == b"100-continue": + self.expect_100_continue = True + self.headers.append((name, value)) def on_headers_complete(self): http_version = self.parser.get_http_version() @@ -242,6 +247,7 @@ class HttpToolsProtocol(asyncio.Protocol): flow=self.flow, logger=self.logger, message_event=self.message_event, + expect_100_continue=self.expect_100_continue, on_response=self.on_response_complete, ) if existing_cycle is None or existing_cycle.response_complete: @@ -337,7 +343,16 @@ class HttpToolsProtocol(asyncio.Protocol): class RequestResponseCycle: - def __init__(self, scope, transport, flow, logger, message_event, on_response): + def __init__( + self, + scope, + transport, + flow, + logger, + message_event, + expect_100_continue, + on_response, + ): self.scope = scope self.transport = transport self.flow = flow @@ -348,6 +363,7 @@ class RequestResponseCycle: # Connection state self.disconnected = False self.keep_alive = True + self.waiting_for_100_continue = expect_100_continue # Request state self.body = b"" @@ -420,6 +436,7 @@ class RequestResponseCycle: raise RuntimeError(msg % message_type) self.response_started = True + self.waiting_for_100_continue = False status_code = message["status"] headers = message.get("headers", []) @@ -467,7 +484,7 @@ class RequestResponseCycle: more_body = message.get("more_body", False) # Write response body - if self.scope['method'] == "HEAD": + if self.scope["method"] == "HEAD": self.expected_content_length = 0 elif self.chunked_encoding: content = [b"%x\r\n" % len(body), body, b"\r\n"] @@ -497,6 +514,10 @@ class RequestResponseCycle: raise RuntimeError(msg % message_type) async def receive(self): + if self.waiting_for_100_continue and not self.transport.is_closing(): + self.transport.write(b"HTTP/1.1 100 Continue\r\n") + self.waiting_for_100_continue = False + self.flow.resume_reading() await self.message_event.wait() self.message_event.clear()
Support `Expect: 100-Continue` * Deal with `Expect: 100-Continue` headers gracefully, by only sending if the request body is read prior to sending the response headers.
encode/uvicorn
diff --git a/tests/protocols/test_http.py b/tests/protocols/test_http.py index 4ec0a9b..0a4f1d6 100644 --- a/tests/protocols/test_http.py +++ b/tests/protocols/test_http.py @@ -625,3 +625,61 @@ def test_shutdown_during_idle(protocol_cls): protocol.shutdown() assert protocol.transport.buffer == b"" assert protocol.transport.is_closing() + + [email protected]("protocol_cls", [HttpToolsProtocol, H11Protocol]) +def test_100_continue_sent_when_body_consumed(protocol_cls): + class App: + def __init__(self, scope): + self.scope = scope + + async def __call__(self, receive, send): + body = b"" + more_body = True + while more_body: + message = await receive() + body += message.get("body", b"") + more_body = message.get("more_body", False) + response = Response(b"Body: " + body, media_type="text/plain") + await response(receive, send) + + protocol = get_connected_protocol(App, protocol_cls) + EXPECT_100_REQUEST = b"\r\n".join( + [ + b"POST / HTTP/1.1", + b"Host: example.org", + b"Expect: 100-continue", + b"Content-Type: application/json", + b"Content-Length: 18", + b"", + b'{"hello": "world"}', + ] + ) + protocol.data_received(EXPECT_100_REQUEST) + protocol.loop.run_one() + assert b"HTTP/1.1 100 Continue" in protocol.transport.buffer + assert b"HTTP/1.1 200 OK" in protocol.transport.buffer + assert b'Body: {"hello": "world"}' in protocol.transport.buffer + + [email protected]("protocol_cls", [HttpToolsProtocol, H11Protocol]) +def test_100_continue_not_sent_when_body_not_consumed(protocol_cls): + def app(scope): + return Response(b"", status_code=204) + + protocol = get_connected_protocol(app, protocol_cls) + EXPECT_100_REQUEST = b"\r\n".join( + [ + b"POST / HTTP/1.1", + b"Host: example.org", + b"Expect: 100-continue", + b"Content-Type: application/json", + b"Content-Length: 18", + b"", + b'{"hello": "world"}', + ] + ) + protocol.data_received(EXPECT_100_REQUEST) + protocol.loop.run_one() + assert b"HTTP/1.1 100 Continue" not in protocol.transport.buffer + assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "requests", "codecov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 codecov==2.1.13 coverage==6.2 dataclasses==0.8 h11==0.13.0 httptools==0.6.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 requests==2.27.1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 -e git+https://github.com/encode/uvicorn.git@fa5185f348aa3be814f986abdec967d9de4a57f3#egg=uvicorn uvloop==0.14.0 websockets==9.1 zipp==3.6.0
name: uvicorn channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - click==8.0.4 - codecov==2.1.13 - coverage==6.2 - dataclasses==0.8 - h11==0.13.0 - httptools==0.6.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - requests==2.27.1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - uvloop==0.14.0 - websockets==9.1 - zipp==3.6.0 prefix: /opt/conda/envs/uvicorn
[ "tests/protocols/test_http.py::test_100_continue_sent_when_body_consumed[HttpToolsProtocol]", "tests/protocols/test_http.py::test_100_continue_sent_when_body_consumed[H11Protocol]" ]
[]
[ "tests/protocols/test_http.py::test_get_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_get_request[H11Protocol]", "tests/protocols/test_http.py::test_head_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_head_request[H11Protocol]", "tests/protocols/test_http.py::test_post_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_post_request[H11Protocol]", "tests/protocols/test_http.py::test_keepalive[HttpToolsProtocol]", "tests/protocols/test_http.py::test_keepalive[H11Protocol]", "tests/protocols/test_http.py::test_keepalive_timeout[HttpToolsProtocol]", "tests/protocols/test_http.py::test_keepalive_timeout[H11Protocol]", "tests/protocols/test_http.py::test_close[HttpToolsProtocol]", "tests/protocols/test_http.py::test_close[H11Protocol]", "tests/protocols/test_http.py::test_chunked_encoding[HttpToolsProtocol]", "tests/protocols/test_http.py::test_chunked_encoding[H11Protocol]", "tests/protocols/test_http.py::test_pipelined_requests[HttpToolsProtocol]", "tests/protocols/test_http.py::test_pipelined_requests[H11Protocol]", "tests/protocols/test_http.py::test_undersized_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_undersized_request[H11Protocol]", "tests/protocols/test_http.py::test_oversized_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_oversized_request[H11Protocol]", "tests/protocols/test_http.py::test_large_post_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_large_post_request[H11Protocol]", "tests/protocols/test_http.py::test_invalid_http[HttpToolsProtocol]", "tests/protocols/test_http.py::test_invalid_http[H11Protocol]", "tests/protocols/test_http.py::test_app_exception[HttpToolsProtocol]", "tests/protocols/test_http.py::test_app_exception[H11Protocol]", "tests/protocols/test_http.py::test_app_init_exception[HttpToolsProtocol]", "tests/protocols/test_http.py::test_app_init_exception[H11Protocol]", "tests/protocols/test_http.py::test_exception_during_response[HttpToolsProtocol]", "tests/protocols/test_http.py::test_exception_during_response[H11Protocol]", "tests/protocols/test_http.py::test_no_response_returned[HttpToolsProtocol]", "tests/protocols/test_http.py::test_no_response_returned[H11Protocol]", "tests/protocols/test_http.py::test_partial_response_returned[HttpToolsProtocol]", "tests/protocols/test_http.py::test_partial_response_returned[H11Protocol]", "tests/protocols/test_http.py::test_duplicate_start_message[HttpToolsProtocol]", "tests/protocols/test_http.py::test_duplicate_start_message[H11Protocol]", "tests/protocols/test_http.py::test_missing_start_message[HttpToolsProtocol]", "tests/protocols/test_http.py::test_missing_start_message[H11Protocol]", "tests/protocols/test_http.py::test_message_after_body_complete[HttpToolsProtocol]", "tests/protocols/test_http.py::test_message_after_body_complete[H11Protocol]", "tests/protocols/test_http.py::test_value_returned[HttpToolsProtocol]", "tests/protocols/test_http.py::test_value_returned[H11Protocol]", "tests/protocols/test_http.py::test_early_disconnect[HttpToolsProtocol]", "tests/protocols/test_http.py::test_early_disconnect[H11Protocol]", "tests/protocols/test_http.py::test_early_response[HttpToolsProtocol]", "tests/protocols/test_http.py::test_early_response[H11Protocol]", "tests/protocols/test_http.py::test_read_after_response[HttpToolsProtocol]", "tests/protocols/test_http.py::test_read_after_response[H11Protocol]", "tests/protocols/test_http.py::test_http10_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_http10_request[H11Protocol]", "tests/protocols/test_http.py::test_root_path[HttpToolsProtocol]", "tests/protocols/test_http.py::test_root_path[H11Protocol]", "tests/protocols/test_http.py::test_proxy_headers[HttpToolsProtocol]", "tests/protocols/test_http.py::test_proxy_headers[H11Protocol]", "tests/protocols/test_http.py::test_max_concurrency[HttpToolsProtocol]", "tests/protocols/test_http.py::test_max_concurrency[H11Protocol]", "tests/protocols/test_http.py::test_shutdown_during_request[HttpToolsProtocol]", "tests/protocols/test_http.py::test_shutdown_during_request[H11Protocol]", "tests/protocols/test_http.py::test_shutdown_during_idle[HttpToolsProtocol]", "tests/protocols/test_http.py::test_shutdown_during_idle[H11Protocol]", "tests/protocols/test_http.py::test_100_continue_not_sent_when_body_not_consumed[HttpToolsProtocol]", "tests/protocols/test_http.py::test_100_continue_not_sent_when_body_not_consumed[H11Protocol]" ]
[]
BSD 3-Clause "New" or "Revised" License
2,832
[ "uvicorn/protocols/http/httptools_impl.py", "uvicorn/protocols/http/h11_impl.py" ]
[ "uvicorn/protocols/http/httptools_impl.py", "uvicorn/protocols/http/h11_impl.py" ]
pydata__sparse-175
6cb64a5246cf07a59aca21f942602655557f5111
2018-07-26 19:12:32
b03b6b9a480a10a3cf59d7994292b9c5d3015cd5
hameerabbasi: Oh. Apparently you're using an old NumPy version... The docstrings need to be produced with newer versions to pass the tests, unfortunately. (1.14.0 or newer I think).
diff --git a/docs/generated/sparse.COO.all.rst b/docs/generated/sparse.COO.all.rst new file mode 100644 index 0000000..f79e99c --- /dev/null +++ b/docs/generated/sparse.COO.all.rst @@ -0,0 +1,6 @@ +COO.all +======= + +.. currentmodule:: sparse + +.. automethod:: COO.all \ No newline at end of file diff --git a/docs/generated/sparse.COO.any.rst b/docs/generated/sparse.COO.any.rst new file mode 100644 index 0000000..1bdad68 --- /dev/null +++ b/docs/generated/sparse.COO.any.rst @@ -0,0 +1,6 @@ +COO.any +======= + +.. currentmodule:: sparse + +.. automethod:: COO.any \ No newline at end of file diff --git a/docs/generated/sparse.COO.rst b/docs/generated/sparse.COO.rst index d0a1c9f..cdd16d4 100644 --- a/docs/generated/sparse.COO.rst +++ b/docs/generated/sparse.COO.rst @@ -40,10 +40,13 @@ COO :toctree: COO.reduce + COO.sum - COO.max - COO.min COO.prod + COO.min + COO.max + COO.any + COO.all .. rubric:: :ref:`Converting to other formats <converting>` .. autosummary:: diff --git a/docs/operations.rst b/docs/operations.rst index 9ca3183..524f2af 100644 --- a/docs/operations.rst +++ b/docs/operations.rst @@ -217,7 +217,21 @@ All of the following will raise an :obj:`IndexError`, like in Numpy 1.13 and lat z[1, 4, 8] z[-6] -.. note:: Numpy advanced indexing is currently not supported. + +Advanced Indexing +~~~~~~~~~~~~~~~~~ + +Advanced indexing (indexing arrays with other arrays) is supported, but only for indexing +with a *single array*. Indexing a single array with multiple arrays is not supported at +this time. As above, if :code:`z.shape` is :code:`(5, 6, 7)`, all of the following will +work like NumPy: + +.. code-block:: python + + z[[0, 1, 2]] + z[1, [3]] + z[1, 4, [3, 6]] + z[:3, :2, [1, 5]] .. _operations-other: diff --git a/requirements.txt b/requirements.txt index e0f833c..bc7f77c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ numpy >= 1.13 scipy >= 0.19 -numba +numba >= 0.39 diff --git a/sparse/coo/core.py b/sparse/coo/core.py index 1ec395b..881a721 100644 --- a/sparse/coo/core.py +++ b/sparse/coo/core.py @@ -811,6 +811,114 @@ class COO(SparseArray, NDArrayOperatorsMixin): """ return np.maximum.reduce(self, out=out, axis=axis, keepdims=keepdims) + def any(self, axis=None, keepdims=False, out=None): + """ + See if any values along array are ``True``. Uses all axes by default. + + Parameters + ---------- + axis : Union[int, Iterable[int]], optional + The axes along which to minimize. Uses all axes by default. + keepdims : bool, optional + Whether or not to keep the dimensions of the original array. + + Returns + ------- + COO + The reduced output sparse array. + + See Also + -------- + :obj:`numpy.all` : Equivalent numpy function. + + Notes + ----- + * This function internally calls :obj:`COO.sum_duplicates` to bring the array into + canonical form. + * The :code:`out` parameter is provided just for compatibility with Numpy and + isn't actually supported. + + Examples + -------- + You can use :obj:`COO.min` to minimize an array across any dimension. + + >>> x = np.array([[False, False], + ... [False, True ], + ... [True, False], + ... [True, True ]]) + >>> s = COO.from_numpy(x) + >>> s2 = s.any(axis=1) + >>> s2.todense() # doctest: +SKIP + array([False, True, True, True]) + + You can also use the :code:`keepdims` argument to keep the dimensions after the + minimization. + + >>> s3 = s.any(axis=1, keepdims=True) + >>> s3.shape + (4, 1) + + By default, this reduces the array down to one number, minimizing along all axes. + + >>> s.any() + True + """ + return np.logical_or.reduce(self, out=out, axis=axis, keepdims=keepdims) + + def all(self, axis=None, keepdims=False, out=None): + """ + See if all values in an array are ``True``. Uses all axes by default. + + Parameters + ---------- + axis : Union[int, Iterable[int]], optional + The axes along which to minimize. Uses all axes by default. + keepdims : bool, optional + Whether or not to keep the dimensions of the original array. + + Returns + ------- + COO + The reduced output sparse array. + + See Also + -------- + :obj:`numpy.all` : Equivalent numpy function. + + Notes + ----- + * This function internally calls :obj:`COO.sum_duplicates` to bring the array into + canonical form. + * The :code:`out` parameter is provided just for compatibility with Numpy and + isn't actually supported. + + Examples + -------- + You can use :obj:`COO.min` to minimize an array across any dimension. + + >>> x = np.array([[False, False], + ... [False, True ], + ... [True, False], + ... [True, True ]]) + >>> s = COO.from_numpy(x) + >>> s2 = s.all(axis=1) + >>> s2.todense() # doctest: +SKIP + array([False, False, False, True]) + + You can also use the :code:`keepdims` argument to keep the dimensions after the + minimization. + + >>> s3 = s.all(axis=1, keepdims=True) + >>> s3.shape + (4, 1) + + By default, this reduces the array down to one boolean, minimizing along all axes. + + >>> s.all() + False + """ + return np.logical_and.reduce(self, out=out, axis=axis, keepdims=keepdims) + def min(self, axis=None, keepdims=False, out=None): """ Minimize along the given axes. Uses all axes by default. @@ -865,7 +973,7 @@ class COO(SparseArray, NDArrayOperatorsMixin): >>> s3.shape (5, 1) - By default, this reduces the array down to one number, minimizing along all axes. + By default, this reduces the array down to one boolean, minimizing along all axes. >>> s.min() 0 diff --git a/sparse/coo/indexing.py b/sparse/coo/indexing.py index a3eaf0f..7ee9688 100644 --- a/sparse/coo/indexing.py +++ b/sparse/coo/indexing.py @@ -1,4 +1,3 @@ -from collections import Iterable from numbers import Integral import numba @@ -60,11 +59,12 @@ def getitem(x, index): index = normalize_index(index, x.shape) # zip_longest so things like x[..., None] are picked up. - if len(index) != 0 and all(ind == slice(0, dim, 1) for ind, dim in zip_longest(index, x.shape)): + if len(index) != 0 and all(isinstance(ind, slice) and ind == slice(0, dim, 1) + for ind, dim in zip_longest(index, x.shape)): return x # Get the mask - mask = _mask(x.coords, index, x.shape) + mask, adv_idx = _mask(x.coords, index, x.shape) # Get the length of the mask if isinstance(mask, slice): @@ -76,8 +76,7 @@ def getitem(x, index): shape = [] i = 0 - sorted = True - + sorted = adv_idx is None or adv_idx.pos == 0 for ind in index: # Nothing is added to shape or coords if the index is an integer. if isinstance(ind, Integral): @@ -88,11 +87,13 @@ def getitem(x, index): shape.append(len(range(ind.start, ind.stop, ind.step))) coords.append((x.coords[i, mask] - ind.start) // ind.step) i += 1 - if ind.step < 0: sorted = False - elif isinstance(ind, Iterable): - raise NotImplementedError('Advanced indexing is not yet supported.') + # Add the index and shape for the advanced index. + elif isinstance(ind, np.ndarray): + shape.append(adv_idx.length) + coords.append(adv_idx.idx) + i += 1 # Add a dimension for None. elif ind is None: coords.append(np.zeros(n, dtype=np.intp)) @@ -122,21 +123,61 @@ def getitem(x, index): def _mask(coords, indices, shape): indices = _prune_indices(indices, shape) + indices, adv_idx, adv_idx_pos = _separate_adv_indices(indices) + + if len(adv_idx) != 0: + if len(adv_idx) != 1: + raise IndexError('Only indices with at most one iterable index are supported.') + + adv_idx = adv_idx[0] + adv_idx_pos = adv_idx_pos[0] + + if adv_idx.ndim != 1: + raise IndexError('Only one-dimensional iterable indices supported.') + + mask, aidxs = _compute_multi_mask(coords, _ind_ar_from_indices(indices), adv_idx, adv_idx_pos) + return mask, _AdvIdxInfo(aidxs, adv_idx_pos, len(adv_idx)) + + mask, is_slice = _compute_mask(coords, _ind_ar_from_indices(indices)) + if is_slice: + return slice(mask[0], mask[1], 1), None + else: + return mask, None + + +def _ind_ar_from_indices(indices): + """ + Computes an index "array" from indices, such that ``indices[i]`` is + transformed to ``ind_ar[i]`` and ``ind_ar[i].shape == (3,)``. It has the + format ``[start, stop, step]``. Integers are converted into steps as well. + + Parameters + ---------- + indices : Iterable + Input indices (slices and integers) + + Returns + ------- + ind_ar : np.ndarray + The output array. + + Examples + -------- + >>> _ind_ar_from_indices([1]) + array([[1, 2, 1]]) + >>> _ind_ar_from_indices([slice(5, 7, 2)]) + array([[5, 7, 2]]) + """ ind_ar = np.empty((len(indices), 3), dtype=np.intp) for i, idx in enumerate(indices): if isinstance(idx, slice): ind_ar[i] = [idx.start, idx.stop, idx.step] - else: # idx is an integer + elif isinstance(idx, Integral): ind_ar[i] = [idx, idx + 1, 1] - mask, is_slice = _compute_mask(coords, ind_ar) - - if is_slice: - return slice(mask[0], mask[1], 1) - else: - return mask + return ind_ar def _prune_indices(indices, shape, prune_none=True): @@ -185,6 +226,85 @@ def _prune_indices(indices, shape, prune_none=True): return indices +def _separate_adv_indices(indices): + """ + Separates advanced from normal indices. + + Parameters + ---------- + indices : list + The input indices + + Returns + ------- + new_idx : list + The normal indices. + adv_idx : list + The advanced indices. + adv_idx_pos : list + The positions of the advanced indices. + """ + adv_idx_pos = [] + new_idx = [] + adv_idx = [] + + for i, idx in enumerate(indices): + if isinstance(idx, np.ndarray): + adv_idx.append(idx) + adv_idx_pos.append(i) + else: + new_idx.append(idx) + + return new_idx, adv_idx, adv_idx_pos + + [email protected](nopython=True, nogil=True) +def _compute_multi_mask(coords, indices, adv_idx, adv_idx_pos): # pragma: no cover + """ + Computes a mask with the advanced index, and also returns the advanced index + dimension. + + Parameters + ---------- + coords : np.ndarray + Coordinates of the input array. + indices : np.ndarray + The indices in slice format. + adv_idx : int + The advanced index. + adv_idx_pos : int + The position of the advanced index. + + Returns + ------- + mask : np.ndarray + The mask. + aidxs : np.ndarray + The advanced array index. + """ + mask = [] + a_indices = [] + full_idx = np.empty((len(indices) + 1, 3), dtype=np.intp) + + full_idx[:adv_idx_pos] = indices[:adv_idx_pos] + full_idx[adv_idx_pos + 1:] = indices[adv_idx_pos:] + + for i, aidx in enumerate(adv_idx): + full_idx[adv_idx_pos] = [aidx, aidx + 1, 1] + partial_mask, is_slice = _compute_mask(coords, full_idx) + if is_slice: + slice_mask = [] + for j in range(partial_mask[0], partial_mask[1]): + slice_mask.append(j) + partial_mask = np.array(slice_mask) + + mask.extend(partial_mask) + for _ in range(len(partial_mask)): + a_indices.append(i) + + return np.array(mask), np.array(a_indices) + + @numba.jit(nopython=True, nogil=True) def _compute_mask(coords, indices): # pragma: no cover """ @@ -327,8 +447,8 @@ def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover # For each matching "integer" in the slice, search within the "sub-coords" # Using binary search. for p_match in range(idx[0], idx[1], idx[2]): - start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j] - stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match + 1) + starts_old[j] + start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match, side='left') + starts_old[j] + stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match, side='right') + starts_old[j] if start != stop: starts.append(start) @@ -413,7 +533,7 @@ def _filter_pairs(starts, stops, coords, indices): # pragma: no cover match &= ((elem - idx[0]) % idx[2] == 0 and ((idx[2] > 0 and idx[0] <= elem < idx[1]) or - (idx[2] < 0 and idx[0] >= elem > idx[1]))) + (idx[2] < 0 and idx[0] >= elem > idx[1]))) # and append to the mask if so. if match: @@ -460,3 +580,10 @@ def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover stops.append(stops_old[-1]) return starts, stops + + +class _AdvIdxInfo(object): + def __init__(self, idx, pos, length): + self.idx = idx + self.pos = pos + self.length = length diff --git a/sparse/slicing.py b/sparse/slicing.py index 56e5b25..087bd52 100644 --- a/sparse/slicing.py +++ b/sparse/slicing.py @@ -3,6 +3,7 @@ import math from numbers import Integral, Number +from collections import Iterable import numpy as np @@ -103,18 +104,18 @@ def check_index(ind, dimension): >>> check_index([6, 3], 5) Traceback (most recent call last): ... - IndexError: Index out of bounds 5 + IndexError: Index out of bounds for dimension 5 >>> check_index(slice(0, 3), 5) """ # unknown dimension, assumed to be in bounds - if isinstance(ind, (list, np.ndarray)): + if isinstance(ind, Iterable): x = np.asanyarray(ind) if np.issubdtype(x.dtype, np.integer) and \ - ((x >= dimension).any() or (x < -dimension).any()): - raise IndexError("Index out of bounds %s" % dimension) + ((x >= dimension) | (x < -dimension)).any(): + raise IndexError("Index out of bounds for dimension {:d}".format(dimension)) elif x.dtype == bool and len(x) != dimension: - raise IndexError("boolean index did not match indexed array; dimension is %s " - "but corresponding boolean dimension is %s", (dimension, len(x))) + raise IndexError("boolean index did not match indexed array; dimension is {:d} " + "but corresponding boolean dimension is {:d}".format(dimension, len(x))) elif isinstance(ind, slice): return elif not isinstance(ind, Integral): @@ -122,12 +123,11 @@ def check_index(ind, dimension): "integer or boolean arrays are valid indices") elif ind >= dimension: - raise IndexError("Index is not smaller than dimension %d >= %d" % - (ind, dimension)) + raise IndexError("Index is not smaller than dimension {:d} >= {:d}".format(ind, dimension)) elif ind < -dimension: - msg = "Negative index is not greater than negative dimension %d <= -%d" - raise IndexError(msg % (ind, dimension)) + msg = "Negative index is not greater than negative dimension {:d} <= -{:d}" + raise IndexError(msg.format(ind, dimension)) def sanitize_index(ind): @@ -157,7 +157,7 @@ def sanitize_index(ind): elif isinstance(ind, Number): return _sanitize_index_element(ind) index_array = np.asanyarray(ind) - if index_array.dtype == bool: + if index_array.dtype == np.bool_: nonzero = np.nonzero(index_array) if len(nonzero) == 1: # If a 1-element tuple, unwrap the element diff --git a/sparse/utils.py b/sparse/utils.py index db16256..f7602ea 100644 --- a/sparse/utils.py +++ b/sparse/utils.py @@ -12,7 +12,7 @@ def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs): if compare_dtype: assert x.dtype == y.dtype - checking_method = np.array_equal \ + check_equal = np.array_equal \ if np.issubdtype(x.dtype, np.integer) and np.issubdtype(y.dtype, np.integer) \ else functools.partial(np.allclose, equal_nan=True) @@ -22,7 +22,7 @@ def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs): assert is_canonical(y) if isinstance(x, COO) and isinstance(y, COO) and check_nnz: - assert np.array_equal(x.coords, y.coords) and checking_method(x.data, y.data, **kwargs) + assert np.array_equal(x.coords, y.coords) and check_equal(x.data, y.data, **kwargs) return if hasattr(x, 'todense'): @@ -37,7 +37,7 @@ def assert_eq(x, y, check_nnz=True, compare_dtype=True, **kwargs): assert_nnz(y, yy) else: yy = y - assert checking_method(xx, yy, **kwargs) + assert check_equal(xx, yy, **kwargs) def assert_nnz(s, x):
Add any and all methods. @stsievert Willing to take a crack at this? It's mostly docs/tests/copy-paste with a one-line implementation and can be easily copied from `min` or `max`: https://github.com/pydata/sparse/blob/6210da31b9fc63c01b083c121421f335d73a580d/sparse/coo/core.py#L692-L939
pydata/sparse
diff --git a/sparse/tests/test_coo.py b/sparse/tests/test_coo.py index abca280..3940dea 100644 --- a/sparse/tests/test_coo.py +++ b/sparse/tests/test_coo.py @@ -27,6 +27,20 @@ def test_reductions(reduction, axis, keepdims, kwargs, eqkwargs): assert_eq(xx, yy, **eqkwargs) [email protected]('reduction,kwargs,eqkwargs', [ + ('any', {}, {}), + ('all', {}, {}), +]) [email protected]('axis', [None, 0, 1, 2, (0, 2), -3, (1, -1)]) [email protected]('keepdims', [True, False]) +def test_reductions_bool(reduction, axis, keepdims, kwargs, eqkwargs): + x = sparse.random((2, 3, 4), density=.25).astype(bool) + y = x.todense() + xx = getattr(x, reduction)(axis=axis, keepdims=keepdims, **kwargs) + yy = getattr(y, reduction)(axis=axis, keepdims=keepdims, **kwargs) + assert_eq(xx, yy, **eqkwargs) + + @pytest.mark.parametrize('reduction,kwargs,eqkwargs', [ (np.max, {}, {}), (np.sum, {}, {}), @@ -433,25 +447,25 @@ def test_trinary_broadcasting(shapes, func): @pytest.mark.parametrize('shapes, func', [ ([ - (2,), - (3, 2), - (4, 3, 2), - ], lambda x, y, z: (x + y) * z), + (2,), + (3, 2), + (4, 3, 2), + ], lambda x, y, z: (x + y) * z), ([ - (3,), - (2, 3), - (2, 2, 3), - ], lambda x, y, z: x * (y + z)), + (3,), + (2, 3), + (2, 2, 3), + ], lambda x, y, z: x * (y + z)), ([ - (2,), - (2, 2), - (2, 2, 2), - ], lambda x, y, z: x * y * z), + (2,), + (2, 2), + (2, 2, 2), + ], lambda x, y, z: x * y * z), ([ - (4,), - (4, 4), - (4, 4, 4), - ], lambda x, y, z: x + y + z), + (4,), + (4, 4), + (4, 4, 4), + ], lambda x, y, z: x + y + z), ]) @pytest.mark.parametrize('value', [ np.nan, @@ -963,8 +977,11 @@ def test_slicing(index): (1, [2, 0], 0), ([True, False], slice(1, None), slice(-2, None)), (slice(1, None), slice(-2, None), [True, False, True, False]), + ([1, 0],), + (Ellipsis, [2, 1, 3],), + (slice(None), [2, 1, 2],), + (1, [2, 0, 1],), ]) [email protected](reason='Advanced indexing is temporarily broken.') def test_advanced_indexing(index): s = sparse.random((2, 3, 4), density=0.5) x = s.todense() @@ -1001,6 +1018,8 @@ def test_custom_dtype_slicing(): 0.5, [0.5], {'potato': 'kartoffel'}, + ([0, 1],) * 2, + ([[0, 1]],), ]) def test_slicing_errors(index): s = sparse.random((2, 3, 4), density=0.5)
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 7 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-flake8", "pytest-cov" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 asv==0.5.1 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 distlib==0.3.9 docutils==0.17.1 filelock==3.4.1 flake8==5.0.4 idna==3.10 imagesize==1.4.1 importlib-metadata==4.2.0 importlib-resources==5.4.0 iniconfig==1.1.1 Jinja2==3.0.3 llvmlite==0.36.0 MarkupSafe==2.0.1 mccabe==0.7.0 numba==0.53.1 numpy==1.19.5 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-flake8==1.1.1 pytz==2025.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 -e git+https://github.com/pydata/sparse.git@6cb64a5246cf07a59aca21f942602655557f5111#egg=sparse Sphinx==4.3.2 sphinx-rtd-theme==1.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.16.2 zipp==3.6.0
name: sparse channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - asv==0.5.1 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - coverage==6.2 - distlib==0.3.9 - docutils==0.17.1 - filelock==3.4.1 - flake8==5.0.4 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.2.0 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jinja2==3.0.3 - llvmlite==0.36.0 - markupsafe==2.0.1 - mccabe==0.7.0 - numba==0.53.1 - numpy==1.19.5 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-flake8==1.1.1 - pytz==2025.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==4.3.2 - sphinx-rtd-theme==1.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.16.2 - zipp==3.6.0 prefix: /opt/conda/envs/sparse
[ "sparse/coo/core.py::sparse.coo.core.COO.all", "sparse/coo/core.py::sparse.coo.core.COO.any", "sparse/coo/indexing.py::sparse.coo.indexing._ind_ar_from_indices", "sparse/tests/test_coo.py::test_reductions_bool[True-None-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-None-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-0-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-0-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-1-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-1-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-2-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-2-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis4-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis4-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True--3-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True--3-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis6-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[True-axis6-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-None-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-None-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-0-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-0-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-1-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-1-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-2-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-2-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis4-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis4-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False--3-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False--3-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis6-any-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions_bool[False-axis6-all-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_advanced_indexing[index0]", "sparse/tests/test_coo.py::test_advanced_indexing[index1]", "sparse/tests/test_coo.py::test_advanced_indexing[index2]", "sparse/tests/test_coo.py::test_advanced_indexing[index3]", "sparse/tests/test_coo.py::test_advanced_indexing[index4]", "sparse/tests/test_coo.py::test_advanced_indexing[index5]", "sparse/tests/test_coo.py::test_advanced_indexing[index6]", "sparse/tests/test_coo.py::test_advanced_indexing[index7]", "sparse/tests/test_coo.py::test_advanced_indexing[index8]", "sparse/tests/test_coo.py::test_advanced_indexing[index9]", "sparse/tests/test_coo.py::test_slicing_errors[index10]", "sparse/tests/test_coo.py::test_slicing_errors[index11]", "sparse/tests/test_dok.py::test_setitem[shape0-index0-0.5249336378806507]", "sparse/tests/test_dok.py::test_setitem[shape1-index1-0.8734346827674154]", "sparse/tests/test_dok.py::test_setitem[shape3-1-0.09148192805554167]", "sparse/tests/test_dok.py::test_setitem[shape4-index4-0.895175738228745]", "sparse/tests/test_dok.py::test_setitem[shape5-index5-0.7039365189808376]", "sparse/tests/test_dok.py::test_setitem[shape9-index9-0.9812604441232181]", "sparse/tests/test_dok.py::test_setitem[shape11-index11-0.45712861544745875]", "sparse/tests/test_dok.py::test_setitem[shape13-index13-0.7306825883567905]" ]
[ "sparse/__init__.py::flake-8::FLAKE8", "sparse/_version.py::flake-8::FLAKE8", "sparse/compatibility.py::flake-8::FLAKE8", "sparse/dok.py::flake-8::FLAKE8", "sparse/io.py::flake-8::FLAKE8", "sparse/slicing.py::flake-8::FLAKE8", "sparse/sparse_array.py::flake-8::FLAKE8", "sparse/utils.py::flake-8::FLAKE8", "sparse/coo/__init__.py::flake-8::FLAKE8", "sparse/coo/common.py::flake-8::FLAKE8", "sparse/coo/core.py::flake-8::FLAKE8", "sparse/coo/indexing.py::flake-8::FLAKE8", "sparse/coo/umath.py::flake-8::FLAKE8", "sparse/tests/test_coo.py::flake-8::FLAKE8", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func2]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func3]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func4]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[func5]", "sparse/tests/test_dok.py::flake-8::FLAKE8", "sparse/tests/test_io.py::flake-8::FLAKE8" ]
[ "sparse/dok.py::sparse.dok.DOK", "sparse/dok.py::sparse.dok.DOK.from_coo", "sparse/dok.py::sparse.dok.DOK.from_numpy", "sparse/dok.py::sparse.dok.DOK.nnz", "sparse/dok.py::sparse.dok.DOK.to_coo", "sparse/dok.py::sparse.dok.DOK.todense", "sparse/io.py::sparse.io.save_npz", "sparse/slicing.py::sparse.slicing.check_index", "sparse/slicing.py::sparse.slicing.clip_slice", "sparse/slicing.py::sparse.slicing.normalize_index", "sparse/slicing.py::sparse.slicing.posify_index", "sparse/slicing.py::sparse.slicing.replace_ellipsis", "sparse/slicing.py::sparse.slicing.replace_none", "sparse/slicing.py::sparse.slicing.sanitize_index", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.density", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.ndim", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.nnz", "sparse/sparse_array.py::sparse.sparse_array.SparseArray.size", "sparse/utils.py::sparse.utils.check_consistent_fill_value", "sparse/utils.py::sparse.utils.check_zero_fill_value", "sparse/utils.py::sparse.utils.equivalent", "sparse/utils.py::sparse.utils.random", "sparse/coo/core.py::sparse.coo.core.COO", "sparse/coo/core.py::sparse.coo.core.COO.T", "sparse/coo/core.py::sparse.coo.core.COO.__len__", "sparse/coo/core.py::sparse.coo.core.COO._sort_indices", "sparse/coo/core.py::sparse.coo.core.COO._sum_duplicates", "sparse/coo/core.py::sparse.coo.core.COO.dot", "sparse/coo/core.py::sparse.coo.core.COO.dtype", "sparse/coo/core.py::sparse.coo.core.COO.from_iter", "sparse/coo/core.py::sparse.coo.core.COO.from_numpy", "sparse/coo/core.py::sparse.coo.core.COO.from_scipy_sparse", "sparse/coo/core.py::sparse.coo.core.COO.linear_loc", "sparse/coo/core.py::sparse.coo.core.COO.max", "sparse/coo/core.py::sparse.coo.core.COO.maybe_densify", "sparse/coo/core.py::sparse.coo.core.COO.min", "sparse/coo/core.py::sparse.coo.core.COO.nbytes", "sparse/coo/core.py::sparse.coo.core.COO.nnz", "sparse/coo/core.py::sparse.coo.core.COO.nonzero", "sparse/coo/core.py::sparse.coo.core.COO.prod", "sparse/coo/core.py::sparse.coo.core.COO.reduce", "sparse/coo/core.py::sparse.coo.core.COO.reshape", "sparse/coo/core.py::sparse.coo.core.COO.sum", "sparse/coo/core.py::sparse.coo.core.COO.todense", "sparse/coo/core.py::sparse.coo.core.COO.transpose", "sparse/coo/indexing.py::sparse.coo.indexing._compute_mask", "sparse/coo/indexing.py::sparse.coo.indexing._filter_pairs", "sparse/coo/indexing.py::sparse.coo.indexing._get_mask_pairs", "sparse/coo/indexing.py::sparse.coo.indexing._get_slice_len", "sparse/coo/indexing.py::sparse.coo.indexing._join_adjacent_pairs", "sparse/coo/indexing.py::sparse.coo.indexing._prune_indices", "sparse/tests/test_coo.py::test_reductions[True-None-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-None-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-0-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-0-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-1-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-1-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-2-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-2-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-axis4-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-axis4-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True--3-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True--3-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True--3-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True--3-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True--3-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[True-axis6-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[True-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[True-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[True-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[True-axis6-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-None-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-None-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-0-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-0-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-1-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-1-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-2-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-2-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-axis4-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-axis4-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False--3-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False--3-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False--3-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False--3-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False--3-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_reductions[False-axis6-max-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_reductions[False-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_reductions[False-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_reductions[False-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_reductions[False-axis6-min-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-None-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-0-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-2-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis4-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True--1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[True-axis6-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-None-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-0-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-2-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis4-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False--1-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-amax-kwargs0-eqkwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-sum-kwargs1-eqkwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-sum-kwargs2-eqkwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-prod-kwargs3-eqkwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions[False-axis6-amin-kwargs4-eqkwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[amax-kwargs0]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[sum-kwargs1]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[prod-kwargs2]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs3]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs4]", "sparse/tests/test_coo.py::test_ufunc_reductions_kwargs[reduce-kwargs5]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.25-False-1-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.5-False-1-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[0.75-False-1-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-None-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-0-nanmin]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nansum]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanprod]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanmax]", "sparse/tests/test_coo.py::test_nan_reductions[1.0-False-1-nanmin]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[None-nanmax]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[None-nanmin]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[0-nanmax]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[0-nanmin]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[1-nanmax]", "sparse/tests/test_coo.py::test_all_nan_reduction_warning[1-nanmin]", "sparse/tests/test_coo.py::test_transpose[None]", "sparse/tests/test_coo.py::test_transpose[axis1]", "sparse/tests/test_coo.py::test_transpose[axis2]", "sparse/tests/test_coo.py::test_transpose[axis3]", "sparse/tests/test_coo.py::test_transpose[axis4]", "sparse/tests/test_coo.py::test_transpose[axis5]", "sparse/tests/test_coo.py::test_transpose[axis6]", "sparse/tests/test_coo.py::test_transpose_error[axis0]", "sparse/tests/test_coo.py::test_transpose_error[axis1]", "sparse/tests/test_coo.py::test_transpose_error[axis2]", "sparse/tests/test_coo.py::test_transpose_error[axis3]", "sparse/tests/test_coo.py::test_transpose_error[axis4]", "sparse/tests/test_coo.py::test_transpose_error[axis5]", "sparse/tests/test_coo.py::test_transpose_error[0.3]", "sparse/tests/test_coo.py::test_transpose_error[axis7]", "sparse/tests/test_coo.py::test_reshape[a0-b0]", "sparse/tests/test_coo.py::test_reshape[a1-b1]", "sparse/tests/test_coo.py::test_reshape[a2-b2]", "sparse/tests/test_coo.py::test_reshape[a3-b3]", "sparse/tests/test_coo.py::test_reshape[a4-b4]", "sparse/tests/test_coo.py::test_reshape[a5-b5]", "sparse/tests/test_coo.py::test_reshape[a6-b6]", "sparse/tests/test_coo.py::test_reshape[a7-b7]", "sparse/tests/test_coo.py::test_reshape[a8-b8]", "sparse/tests/test_coo.py::test_reshape[a9-b9]", "sparse/tests/test_coo.py::test_large_reshape", "sparse/tests/test_coo.py::test_reshape_same", "sparse/tests/test_coo.py::test_to_scipy_sparse", "sparse/tests/test_coo.py::test_tensordot[a_shape0-b_shape0-axes0]", "sparse/tests/test_coo.py::test_tensordot[a_shape1-b_shape1-axes1]", "sparse/tests/test_coo.py::test_tensordot[a_shape2-b_shape2-axes2]", "sparse/tests/test_coo.py::test_tensordot[a_shape3-b_shape3-axes3]", "sparse/tests/test_coo.py::test_tensordot[a_shape4-b_shape4-axes4]", "sparse/tests/test_coo.py::test_tensordot[a_shape5-b_shape5-axes5]", "sparse/tests/test_coo.py::test_tensordot[a_shape6-b_shape6-axes6]", "sparse/tests/test_coo.py::test_tensordot[a_shape7-b_shape7-axes7]", "sparse/tests/test_coo.py::test_tensordot[a_shape8-b_shape8-axes8]", "sparse/tests/test_coo.py::test_tensordot[a_shape9-b_shape9-0]", "sparse/tests/test_coo.py::test_dot[a_shape0-b_shape0]", "sparse/tests/test_coo.py::test_dot[a_shape1-b_shape1]", "sparse/tests/test_coo.py::test_dot[a_shape2-b_shape2]", "sparse/tests/test_coo.py::test_dot[a_shape3-b_shape3]", "sparse/tests/test_coo.py::test_dot[a_shape4-b_shape4]", "sparse/tests/test_coo.py::test_elemwise[expm1]", "sparse/tests/test_coo.py::test_elemwise[log1p]", "sparse/tests/test_coo.py::test_elemwise[sin]", "sparse/tests/test_coo.py::test_elemwise[tan]", "sparse/tests/test_coo.py::test_elemwise[sinh]", "sparse/tests/test_coo.py::test_elemwise[tanh]", "sparse/tests/test_coo.py::test_elemwise[floor]", "sparse/tests/test_coo.py::test_elemwise[ceil]", "sparse/tests/test_coo.py::test_elemwise[sqrt]", "sparse/tests/test_coo.py::test_elemwise[conjugate0]", "sparse/tests/test_coo.py::test_elemwise[round_]", "sparse/tests/test_coo.py::test_elemwise[rint]", "sparse/tests/test_coo.py::test_elemwise[<lambda>0]", "sparse/tests/test_coo.py::test_elemwise[conjugate1]", "sparse/tests/test_coo.py::test_elemwise[conjugate2]", "sparse/tests/test_coo.py::test_elemwise[<lambda>1]", "sparse/tests/test_coo.py::test_elemwise[abs]", "sparse/tests/test_coo.py::test_elemwise_inplace[expm1]", "sparse/tests/test_coo.py::test_elemwise_inplace[log1p]", "sparse/tests/test_coo.py::test_elemwise_inplace[sin]", "sparse/tests/test_coo.py::test_elemwise_inplace[tan]", "sparse/tests/test_coo.py::test_elemwise_inplace[sinh]", "sparse/tests/test_coo.py::test_elemwise_inplace[tanh]", "sparse/tests/test_coo.py::test_elemwise_inplace[floor]", "sparse/tests/test_coo.py::test_elemwise_inplace[ceil]", "sparse/tests/test_coo.py::test_elemwise_inplace[sqrt]", "sparse/tests/test_coo.py::test_elemwise_inplace[conjugate0]", "sparse/tests/test_coo.py::test_elemwise_inplace[round_]", "sparse/tests/test_coo.py::test_elemwise_inplace[rint]", "sparse/tests/test_coo.py::test_elemwise_inplace[conjugate1]", "sparse/tests/test_coo.py::test_elemwise_inplace[conjugate2]", "sparse/tests/test_coo.py::test_elemwise_inplace[<lambda>]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape0-ne]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape1-ne]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape2-ne]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-mul]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-add]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-sub]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-gt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-lt]", "sparse/tests/test_coo.py::test_elemwise_binary[shape3-ne]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape0-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape0-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape0-isub]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape1-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape1-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape1-isub]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape2-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape2-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape2-isub]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape3-imul]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape3-iadd]", "sparse/tests/test_coo.py::test_elemwise_binary_inplace[shape3-isub]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape0-<lambda>3]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape1-<lambda>3]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape2-<lambda>3]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>0]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>1]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>2]", "sparse/tests/test_coo.py::test_elemwise_trinary[shape3-<lambda>3]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape10-shape20-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape10-shape20-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape11-shape21-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape11-shape21-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape12-shape22-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape12-shape22-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape13-shape23-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape13-shape23-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape14-shape24-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape14-shape24-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape15-shape25-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape15-shape25-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape16-shape26-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape16-shape26-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape17-shape27-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape17-shape27-mul]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape18-shape28-add]", "sparse/tests/test_coo.py::test_binary_broadcasting[shape18-shape28-mul]", "sparse/tests/test_coo.py::test_broadcast_to[shape10-shape20]", "sparse/tests/test_coo.py::test_broadcast_to[shape11-shape21]", "sparse/tests/test_coo.py::test_broadcast_to[shape12-shape22]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>0-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>1-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>2-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>3-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>4-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes0]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes1]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes2]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes3]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes4]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes5]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes6]", "sparse/tests/test_coo.py::test_trinary_broadcasting[<lambda>5-shapes7]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.25--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.5--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[0.75--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-nan-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0-inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes0-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes1-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes2-<lambda>]", "sparse/tests/test_coo.py::test_trinary_broadcasting_pathological[1.0--inf-shapes3-<lambda>]", "sparse/tests/test_coo.py::test_sparse_broadcasting", "sparse/tests/test_coo.py::test_dense_broadcasting", "sparse/tests/test_coo.py::test_sparsearray_elemwise[coo]", "sparse/tests/test_coo.py::test_sparsearray_elemwise[dok]", "sparse/tests/test_coo.py::test_ndarray_densification_fails", "sparse/tests/test_coo.py::test_elemwise_noargs", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[pow]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[truediv]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[floordiv]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[ge]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[le]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[eq]", "sparse/tests/test_coo.py::test_nonzero_outout_fv_ufunc[mod]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-mul-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-add-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-sub-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-pow-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-truediv-3]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-floordiv-4]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-gt-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-lt--5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-ne-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-ge-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-le--3]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-eq-1]", "sparse/tests/test_coo.py::test_elemwise_scalar[True-mod-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-mul-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-add-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-sub-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-pow-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-truediv-3]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-floordiv-4]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-gt-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-lt--5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-ne-0]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-ge-5]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-le--3]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-eq-1]", "sparse/tests/test_coo.py::test_elemwise_scalar[False-mod-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-mul-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-add-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-sub-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-gt--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-lt-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-ne-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-ge--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-le-3]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[True-eq-1]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-mul-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-add-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-sub-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-gt--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-lt-5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-ne-0]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-ge--5]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-le-3]", "sparse/tests/test_coo.py::test_leftside_elemwise_scalar[False-eq-1]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[add-5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[sub--5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[pow--3]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[truediv-0]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[floordiv-0]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[gt--5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[lt-5]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[ne-1]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[ge--3]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[le-3]", "sparse/tests/test_coo.py::test_scalar_output_nonzero_fv[eq-0]", "sparse/tests/test_coo.py::test_bitwise_binary[shape0-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape0-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape0-xor]", "sparse/tests/test_coo.py::test_bitwise_binary[shape1-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape1-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape1-xor]", "sparse/tests/test_coo.py::test_bitwise_binary[shape2-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape2-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape2-xor]", "sparse/tests/test_coo.py::test_bitwise_binary[shape3-and_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape3-or_]", "sparse/tests/test_coo.py::test_bitwise_binary[shape3-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape0-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape0-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape0-ixor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape1-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape1-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape1-ixor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape2-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape2-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape2-ixor]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape3-iand]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape3-ior]", "sparse/tests/test_coo.py::test_bitwise_binary_inplace[shape3-ixor]", "sparse/tests/test_coo.py::test_bitshift_binary[shape0-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape0-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape1-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape1-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape2-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape2-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape3-lshift]", "sparse/tests/test_coo.py::test_bitshift_binary[shape3-rshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape0-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape0-irshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape1-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape1-irshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape2-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape2-irshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape3-ilshift]", "sparse/tests/test_coo.py::test_bitshift_binary_inplace[shape3-irshift]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape0-and_]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape1-and_]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape2-and_]", "sparse/tests/test_coo.py::test_bitwise_scalar[shape3-and_]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape0-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape0-rshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape1-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape1-rshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape2-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape2-rshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape3-lshift]", "sparse/tests/test_coo.py::test_bitshift_scalar[shape3-rshift]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape0-invert]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape1-invert]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape2-invert]", "sparse/tests/test_coo.py::test_unary_bitwise_nonzero_output_fv[shape3-invert]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape0-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape0-xor]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape1-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape1-xor]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape2-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape2-xor]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape3-or_]", "sparse/tests/test_coo.py::test_binary_bitwise_nonzero_output_fv[shape3-xor]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape0-ne]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape1-ne]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape2-ne]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-mul]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-add]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-sub]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-gt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-lt]", "sparse/tests/test_coo.py::test_elemwise_nonzero_input_fv[shape3-ne]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape0-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape0-rshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape1-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape1-rshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape2-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape2-rshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape3-lshift]", "sparse/tests/test_coo.py::test_binary_bitshift_densification_fails[shape3-rshift]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape0-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape1-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape2-xor]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-and_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-or_]", "sparse/tests/test_coo.py::test_bitwise_binary_bool[shape3-xor]", "sparse/tests/test_coo.py::test_elemwise_binary_empty", "sparse/tests/test_coo.py::test_gt", "sparse/tests/test_coo.py::test_slicing[0]", "sparse/tests/test_coo.py::test_slicing[1]", "sparse/tests/test_coo.py::test_slicing[-1]", "sparse/tests/test_coo.py::test_slicing[index3]", "sparse/tests/test_coo.py::test_slicing[index4]", "sparse/tests/test_coo.py::test_slicing[index5]", "sparse/tests/test_coo.py::test_slicing[index6]", "sparse/tests/test_coo.py::test_slicing[index7]", "sparse/tests/test_coo.py::test_slicing[index8]", "sparse/tests/test_coo.py::test_slicing[index9]", "sparse/tests/test_coo.py::test_slicing[index10]", "sparse/tests/test_coo.py::test_slicing[index11]", "sparse/tests/test_coo.py::test_slicing[index12]", "sparse/tests/test_coo.py::test_slicing[index13]", "sparse/tests/test_coo.py::test_slicing[index14]", "sparse/tests/test_coo.py::test_slicing[index15]", "sparse/tests/test_coo.py::test_slicing[index16]", "sparse/tests/test_coo.py::test_slicing[index17]", "sparse/tests/test_coo.py::test_slicing[index18]", "sparse/tests/test_coo.py::test_slicing[index19]", "sparse/tests/test_coo.py::test_slicing[index20]", "sparse/tests/test_coo.py::test_slicing[index21]", "sparse/tests/test_coo.py::test_slicing[index22]", "sparse/tests/test_coo.py::test_slicing[index23]", "sparse/tests/test_coo.py::test_slicing[index24]", "sparse/tests/test_coo.py::test_slicing[index25]", "sparse/tests/test_coo.py::test_slicing[index26]", "sparse/tests/test_coo.py::test_slicing[index27]", "sparse/tests/test_coo.py::test_slicing[index28]", "sparse/tests/test_coo.py::test_slicing[index29]", "sparse/tests/test_coo.py::test_slicing[index30]", "sparse/tests/test_coo.py::test_slicing[index31]", "sparse/tests/test_coo.py::test_slicing[index32]", "sparse/tests/test_coo.py::test_slicing[index33]", "sparse/tests/test_coo.py::test_slicing[index34]", "sparse/tests/test_coo.py::test_slicing[index35]", "sparse/tests/test_coo.py::test_slicing[index36]", "sparse/tests/test_coo.py::test_slicing[index37]", "sparse/tests/test_coo.py::test_slicing[index38]", "sparse/tests/test_coo.py::test_slicing[index39]", "sparse/tests/test_coo.py::test_slicing[index40]", "sparse/tests/test_coo.py::test_slicing[index41]", "sparse/tests/test_coo.py::test_slicing[index42]", "sparse/tests/test_coo.py::test_slicing[index43]", "sparse/tests/test_coo.py::test_slicing[index44]", "sparse/tests/test_coo.py::test_slicing[index45]", "sparse/tests/test_coo.py::test_custom_dtype_slicing", "sparse/tests/test_coo.py::test_slicing_errors[index0]", "sparse/tests/test_coo.py::test_slicing_errors[index1]", "sparse/tests/test_coo.py::test_slicing_errors[index2]", "sparse/tests/test_coo.py::test_slicing_errors[5]", "sparse/tests/test_coo.py::test_slicing_errors[-5]", "sparse/tests/test_coo.py::test_slicing_errors[foo]", "sparse/tests/test_coo.py::test_slicing_errors[index6]", "sparse/tests/test_coo.py::test_slicing_errors[0.5]", "sparse/tests/test_coo.py::test_slicing_errors[index8]", "sparse/tests/test_coo.py::test_slicing_errors[index9]", "sparse/tests/test_coo.py::test_concatenate", "sparse/tests/test_coo.py::test_concatenate_mixed[stack-0]", "sparse/tests/test_coo.py::test_concatenate_mixed[stack-1]", "sparse/tests/test_coo.py::test_concatenate_mixed[concatenate-0]", "sparse/tests/test_coo.py::test_concatenate_mixed[concatenate-1]", "sparse/tests/test_coo.py::test_concatenate_noarrays", "sparse/tests/test_coo.py::test_stack[0-shape0]", "sparse/tests/test_coo.py::test_stack[0-shape1]", "sparse/tests/test_coo.py::test_stack[0-shape2]", "sparse/tests/test_coo.py::test_stack[1-shape0]", "sparse/tests/test_coo.py::test_stack[1-shape1]", "sparse/tests/test_coo.py::test_stack[1-shape2]", "sparse/tests/test_coo.py::test_stack[-1-shape0]", "sparse/tests/test_coo.py::test_stack[-1-shape1]", "sparse/tests/test_coo.py::test_stack[-1-shape2]", "sparse/tests/test_coo.py::test_large_concat_stack", "sparse/tests/test_coo.py::test_addition", "sparse/tests/test_coo.py::test_scalar_multiplication[2]", "sparse/tests/test_coo.py::test_scalar_multiplication[2.5]", "sparse/tests/test_coo.py::test_scalar_multiplication[scalar2]", "sparse/tests/test_coo.py::test_scalar_multiplication[scalar3]", "sparse/tests/test_coo.py::test_scalar_exponentiation", "sparse/tests/test_coo.py::test_create_with_lists_of_tuples", "sparse/tests/test_coo.py::test_sizeof", "sparse/tests/test_coo.py::test_scipy_sparse_interface", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[coo]", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[csr]", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[dok]", "sparse/tests/test_coo.py::test_scipy_sparse_interaction[csc]", "sparse/tests/test_coo.py::test_op_scipy_sparse[mul]", "sparse/tests/test_coo.py::test_op_scipy_sparse[add]", "sparse/tests/test_coo.py::test_op_scipy_sparse[sub]", "sparse/tests/test_coo.py::test_op_scipy_sparse[gt]", "sparse/tests/test_coo.py::test_op_scipy_sparse[lt]", "sparse/tests/test_coo.py::test_op_scipy_sparse[ne]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[add]", "sparse/tests/test_coo.py::test_op_scipy_sparse_left[sub]", "sparse/tests/test_coo.py::test_cache_csr", "sparse/tests/test_coo.py::test_empty_shape", "sparse/tests/test_coo.py::test_single_dimension", "sparse/tests/test_coo.py::test_large_sum", "sparse/tests/test_coo.py::test_add_many_sparse_arrays", "sparse/tests/test_coo.py::test_caching", "sparse/tests/test_coo.py::test_scalar_slicing", "sparse/tests/test_coo.py::test_triul[shape0-0]", "sparse/tests/test_coo.py::test_triul[shape1-1]", "sparse/tests/test_coo.py::test_triul[shape2--1]", "sparse/tests/test_coo.py::test_triul[shape3--2]", "sparse/tests/test_coo.py::test_triul[shape4-1000]", "sparse/tests/test_coo.py::test_empty_reduction", "sparse/tests/test_coo.py::test_random_shape[0.1-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.1-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.1-shape2]", "sparse/tests/test_coo.py::test_random_shape[0.3-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.3-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.3-shape2]", "sparse/tests/test_coo.py::test_random_shape[0.5-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.5-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.5-shape2]", "sparse/tests/test_coo.py::test_random_shape[0.7-shape0]", "sparse/tests/test_coo.py::test_random_shape[0.7-shape1]", "sparse/tests/test_coo.py::test_random_shape[0.7-shape2]", "sparse/tests/test_coo.py::test_two_random_unequal", "sparse/tests/test_coo.py::test_two_random_same_seed", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.0-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.01-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.1-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape0-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-None-float64]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-rvs-int]", "sparse/tests/test_coo.py::test_random_rvs[0.2-shape1-<lambda>-bool]", "sparse/tests/test_coo.py::test_random_fv[coo]", "sparse/tests/test_coo.py::test_random_fv[dok]", "sparse/tests/test_coo.py::test_scalar_shape_construction", "sparse/tests/test_coo.py::test_len", "sparse/tests/test_coo.py::test_density", "sparse/tests/test_coo.py::test_size", "sparse/tests/test_coo.py::test_np_array", "sparse/tests/test_coo.py::test_three_arg_where[shapes0]", "sparse/tests/test_coo.py::test_three_arg_where[shapes1]", "sparse/tests/test_coo.py::test_three_arg_where[shapes2]", "sparse/tests/test_coo.py::test_three_arg_where[shapes3]", "sparse/tests/test_coo.py::test_three_arg_where[shapes4]", "sparse/tests/test_coo.py::test_three_arg_where[shapes5]", "sparse/tests/test_coo.py::test_three_arg_where[shapes6]", "sparse/tests/test_coo.py::test_three_arg_where[shapes7]", "sparse/tests/test_coo.py::test_one_arg_where", "sparse/tests/test_coo.py::test_one_arg_where_dense", "sparse/tests/test_coo.py::test_two_arg_where", "sparse/tests/test_coo.py::test_inplace_invalid_shape[imul]", "sparse/tests/test_coo.py::test_inplace_invalid_shape[iadd]", "sparse/tests/test_coo.py::test_inplace_invalid_shape[isub]", "sparse/tests/test_coo.py::test_nonzero", "sparse/tests/test_coo.py::test_argwhere", "sparse/tests/test_coo.py::test_asformat[coo]", "sparse/tests/test_coo.py::test_asformat[dok]", "sparse/tests/test_coo.py::test_as_coo[COO]", "sparse/tests/test_coo.py::test_as_coo[DOK]", "sparse/tests/test_coo.py::test_as_coo[csr_matrix]", "sparse/tests/test_coo.py::test_as_coo[asarray]", "sparse/tests/test_coo.py::test_invalid_shape_error", "sparse/tests/test_coo.py::test_invalid_iterable_error", "sparse/tests/test_coo.py::TestRoll::test_1d[0]", "sparse/tests/test_coo.py::TestRoll::test_1d[2]", "sparse/tests/test_coo.py::TestRoll::test_1d[-2]", "sparse/tests/test_coo.py::TestRoll::test_1d[20]", "sparse/tests/test_coo.py::TestRoll::test_1d[-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[None-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[None-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[None--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[None-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[None--20]", "sparse/tests/test_coo.py::TestRoll::test_2d[0-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[0-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[0--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[0-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[0--20]", "sparse/tests/test_coo.py::TestRoll::test_2d[1-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[1-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[1--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[1-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[1--20]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3-0]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3-2]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3--2]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3-20]", "sparse/tests/test_coo.py::TestRoll::test_2d[ax3--20]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax0-shift3]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax1-shift3]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax2-shift3]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift0]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift1]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift2]", "sparse/tests/test_coo.py::TestRoll::test_multiaxis[ax3-shift3]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[None--20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[0--20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[1--20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3-0]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3-2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3--2]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3-20]", "sparse/tests/test_coo.py::TestRoll::test_original_is_copied[ax3--20]", "sparse/tests/test_coo.py::TestRoll::test_empty", "sparse/tests/test_coo.py::TestRoll::test_valerr[args0]", "sparse/tests/test_coo.py::TestRoll::test_valerr[args1]", "sparse/tests/test_coo.py::TestRoll::test_valerr[args2]", "sparse/tests/test_coo.py::TestRoll::test_valerr[args3]", "sparse/tests/test_coo.py::TestFailFillValue::test_nonzero_fv", "sparse/tests/test_coo.py::TestFailFillValue::test_inconsistent_fv", "sparse/tests/test_coo.py::test_initialization[2]", "sparse/tests/test_coo.py::test_initialization[3]", "sparse/tests/test_coo.py::test_initialization[4]", "sparse/tests/test_coo.py::test_initialization[5]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.1-shape2]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.3-shape2]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.5-shape2]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape0]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape1]", "sparse/tests/test_dok.py::test_random_shape_nnz[0.7-shape2]", "sparse/tests/test_dok.py::test_convert_to_coo", "sparse/tests/test_dok.py::test_convert_from_coo", "sparse/tests/test_dok.py::test_convert_from_numpy", "sparse/tests/test_dok.py::test_convert_to_numpy", "sparse/tests/test_dok.py::test_construct[2-data0]", "sparse/tests/test_dok.py::test_construct[shape1-data1]", "sparse/tests/test_dok.py::test_construct[shape2-data2]", "sparse/tests/test_dok.py::test_getitem[0.1-shape0]", "sparse/tests/test_dok.py::test_getitem[0.1-shape1]", "sparse/tests/test_dok.py::test_getitem[0.1-shape2]", "sparse/tests/test_dok.py::test_getitem[0.3-shape0]", "sparse/tests/test_dok.py::test_getitem[0.3-shape1]", "sparse/tests/test_dok.py::test_getitem[0.3-shape2]", "sparse/tests/test_dok.py::test_getitem[0.5-shape0]", "sparse/tests/test_dok.py::test_getitem[0.5-shape1]", "sparse/tests/test_dok.py::test_getitem[0.5-shape2]", "sparse/tests/test_dok.py::test_getitem[0.7-shape0]", "sparse/tests/test_dok.py::test_getitem[0.7-shape1]", "sparse/tests/test_dok.py::test_getitem[0.7-shape2]", "sparse/tests/test_dok.py::test_setitem[shape2-index2-value2]", "sparse/tests/test_dok.py::test_setitem[shape6-index6-value6]", "sparse/tests/test_dok.py::test_setitem[shape7-index7-value7]", "sparse/tests/test_dok.py::test_setitem[shape8-index8-value8]", "sparse/tests/test_dok.py::test_setitem[shape10-index10-value10]", "sparse/tests/test_dok.py::test_setitem[shape12-index12-value12]", "sparse/tests/test_dok.py::test_default_dtype", "sparse/tests/test_dok.py::test_int_dtype", "sparse/tests/test_dok.py::test_float_dtype", "sparse/tests/test_dok.py::test_set_zero", "sparse/tests/test_dok.py::test_asformat[coo]", "sparse/tests/test_dok.py::test_asformat[dok]", "sparse/tests/test_io.py::test_save_load_npz_file[True]", "sparse/tests/test_io.py::test_save_load_npz_file[False]", "sparse/tests/test_io.py::test_load_wrong_format_exception" ]
[]
BSD 3-Clause "New" or "Revised" License
2,833
[ "sparse/utils.py", "docs/generated/sparse.COO.rst", "sparse/coo/indexing.py", "sparse/slicing.py", "docs/generated/sparse.COO.all.rst", "docs/generated/sparse.COO.any.rst", "docs/operations.rst", "sparse/coo/core.py", "requirements.txt" ]
[ "sparse/utils.py", "docs/generated/sparse.COO.rst", "sparse/coo/indexing.py", "sparse/slicing.py", "docs/generated/sparse.COO.all.rst", "docs/generated/sparse.COO.any.rst", "docs/operations.rst", "sparse/coo/core.py", "requirements.txt" ]
zopefoundation__zope.formlib-16
0813545c8c2153cff0577679fec6bb8f00332a8d
2018-07-26 21:15:11
0813545c8c2153cff0577679fec6bb8f00332a8d
diff --git a/.gitignore b/.gitignore index 96407f9..325ced7 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,6 @@ develop-eggs/ eggs/ parts/ coverage/ +.coverage +htmlcov/ +docs/_build/ diff --git a/CHANGES.rst b/CHANGES.rst index d50a970..6b42179 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,14 +1,15 @@ -Changes -======= +========= + Changes +========= -4.5 (unreleased) -================ +4.5.0 (unreleased) +================== -- Nothing changed yet. +- Move documentation to https://zopeformlib.readthedocs.io -4.4 (2017-08-15) -================ +4.4.0 (2017-08-15) +================== - Add support for Python 3.5, and 3.6. diff --git a/MANIFEST.in b/MANIFEST.in index 45019b2..470140c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,6 +6,10 @@ include tox.ini include .travis.yml include .coveragerc +recursive-include docs *.py +recursive-include docs *.rst +recursive-include docs Makefile + recursive-include src * global-exclude *.pyc diff --git a/README.rst b/README.rst index 8a4ff99..8f052d2 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,17 @@ -``zope.formlib`` -================ +============== + zope.formlib +============== .. image:: https://travis-ci.org/zopefoundation/zope.formlib.svg?branch=master :target: https://travis-ci.org/zopefoundation/zope.formlib +.. image:: https://readthedocs.org/projects/zopeformlib/badge/?version=latest + :target: https://zopeformlib.readthedocs.io/en/latest/ + :alt: Documentation Status + + Forms are web components that use widgets to display and input data. Typically a template displays the widgets by accessing an attribute or method on an underlying class. + +Documentation is hosted at https://zopeformlib.readthedocs.io/en/latest/ diff --git a/docs/api/index.rst b/docs/api/index.rst new file mode 100644 index 0000000..d746395 --- /dev/null +++ b/docs/api/index.rst @@ -0,0 +1,78 @@ +=============== + API Reference +=============== + +zope.formlib.interfaces +======================= + +.. automodule:: zope.formlib.interfaces + +zope.formlib.boolwidgets +======================== + +.. automodule:: zope.formlib.boolwidgets + +zope.formlib.errors +=================== + +.. automodule:: zope.formlib.errors + +zope.formlib.exception +====================== + +.. automodule:: zope.formlib.exception + +zope.formlib.form +================= + +.. automodule:: zope.formlib.form + +zope.formlib.i18n +================= + +.. automodule:: zope.formlib.i18n + +zope.formlib.itemswidgets +========================= + +.. automodule:: zope.formlib.itemswidgets + +zope.formlib.namedtemplate +========================== + +.. automodule:: zope.formlib.namedtemplate + +zope.formlib.objectwidget +========================= + +.. automodule:: zope.formlib.objectwidget + +zope.formlib.sequencewidget +=========================== + +.. automodule:: zope.formlib.sequencewidget + +zope.formlib.source +=================== + +.. automodule:: zope.formlib.source + +zope.formlib.textwidgets +======================== + +.. automodule:: zope.formlib.textwidgets + +zope.formlib.utility +==================== + +.. automodule:: zope.formlib.utility + +zope.formlib.widget +=================== + +.. automodule:: zope.formlib.widget + +zope.formlib.widgets +==================== + +.. automodule:: zope.formlib.widgets diff --git a/docs/changelog.rst b/docs/changelog.rst new file mode 100644 index 0000000..d9e113e --- /dev/null +++ b/docs/changelog.rst @@ -0,0 +1,1 @@ +.. include:: ../CHANGES.rst diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..e1ba4b5 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,200 @@ +# -*- coding: utf-8 -*- +# +# zope.formlib documentation build configuration file, created by +# sphinx-quickstart on Thu Jun 8 07:02:25 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) +import os +import sys +import pkg_resources +# Use the python versions instead of the cython compiled versions +# for better documentation extraction and ease of tweaking docs. +os.environ['PURE_PYTHON'] = '1' + +sys.path.append(os.path.abspath('../src')) +rqmt = pkg_resources.require('zope.formlib')[0] + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + 'repoze.sphinx.autointerface', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'zope.formlib' +copyright = u'2017, Zope Foundation contributors' +author = u'Zope Foundation contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '%s.%s' % tuple(map(int, rqmt.version.split('.')[:2])) +# The full version, including alpha/beta/rc tags. +release = rqmt.version + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The reST default role (used for this markup: `text`) to use for all documents. +default_role = 'obj' + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'perldoc' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'zopeformlibdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'zopeformlib.tex', u'zope.formlib Documentation', + u'Zopefoundation', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'zopeformlib', u'zope.formlib Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'zopeformlib', u'zope.formlib Documentation', + author, 'zopeformlib', 'One line description of project.', + 'Miscellaneous'), +] + + + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'https://docs.python.org/': None, + 'https://zopebrowser.readthedocs.io/en/latest': None, + 'https://zopecomponent.readthedocs.io/en/latest': None, + 'https://zopeinterface.readthedocs.io/en/latest': None, + 'https://zopepublisher.readthedocs.io/en/latest': None, + 'https://zopeschema.readthedocs.io/en/latest': None, +} + +extlinks = { + 'issue': ('https://github.com/zopefoundation/zope.formlib/issues/%s', + 'issue #'), + 'pr': ('https://github.com/zopefoundation/zope.formlib/pull/%s', + 'pull request #')} + +autodoc_default_flags = ['members', 'show-inheritance'] +autoclass_content = 'both' +# This causes the order in __all__ to be ignored :( +# autodoc_member_order = 'bysource' diff --git a/docs/errors.rst b/docs/errors.rst new file mode 100644 index 0000000..f14f462 --- /dev/null +++ b/docs/errors.rst @@ -0,0 +1,1 @@ +.. include:: ../src/zope/formlib/errors.rst diff --git a/docs/form.rst b/docs/form.rst new file mode 100644 index 0000000..4006578 --- /dev/null +++ b/docs/form.rst @@ -0,0 +1,1 @@ +.. include:: ../src/zope/formlib/form.rst diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..5f42944 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,34 @@ + +.. include:: ../README.rst + + +Introduction and Basics +======================= + +.. toctree:: + :maxdepth: 2 + + form + widgets + errors + objectwidget + source + changelog + + +API Details +=========== + +.. toctree:: + :maxdepth: 2 + + api/index + + +==================== + Indices and tables +==================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/objectwidget.rst b/docs/objectwidget.rst new file mode 100644 index 0000000..4cbbb77 --- /dev/null +++ b/docs/objectwidget.rst @@ -0,0 +1,1 @@ +.. include:: ../src/zope/formlib/objectwidget.rst diff --git a/docs/source.rst b/docs/source.rst new file mode 100644 index 0000000..f8ccceb --- /dev/null +++ b/docs/source.rst @@ -0,0 +1,1 @@ +.. include:: ../src/zope/formlib/source.rst diff --git a/docs/widgets.rst b/docs/widgets.rst new file mode 100644 index 0000000..b6c2a51 --- /dev/null +++ b/docs/widgets.rst @@ -0,0 +1,1 @@ +.. include:: ../src/zope/formlib/widgets.rst diff --git a/rtd-requirements.txt b/rtd-requirements.txt new file mode 100644 index 0000000..e9704b8 --- /dev/null +++ b/rtd-requirements.txt @@ -0,0 +1,1 @@ +.[docs] diff --git a/setup.py b/setup.py index 4a86845..d0ae4de 100644 --- a/setup.py +++ b/setup.py @@ -45,16 +45,11 @@ setup(name='zope.formlib', author='Zope Foundation and Contributors', author_email='[email protected]', description='Form generation and validation library for Zope', - long_description=(read('README.rst') - + '\n\n' + - read('src', 'zope', 'formlib', 'form.txt') - + '\n\n' + - read('src', 'zope', 'formlib', 'widgets.txt') - + '\n\n' + - read('src', 'zope', 'formlib', 'errors.txt') - + '\n\n' + - read('CHANGES.rst') - ), + long_description=( + read('README.rst') + + '\n\n' + + read('CHANGES.rst') + ), license='ZPL 2.1', keywords="zope3 form widget", classifiers=[ @@ -75,40 +70,46 @@ setup(name='zope.formlib', 'Operating System :: OS Independent', 'Topic :: Internet :: WWW/HTTP', 'Framework :: Zope3', - ], + ], url='https://github.com/zopefoundation/zope.formlib', packages=find_packages('src'), - package_dir = {'': 'src'}, + package_dir={'': 'src'}, namespace_packages=['zope',], - extras_require=dict( - test=['zope.configuration', - 'zope.testing', - 'zope.testrunner', - ] - ), + extras_require={ + 'test': [ + 'zope.configuration', + 'zope.testing', + 'zope.testrunner', + ], + 'docs': [ + 'Sphinx', + 'sphinx_rtd_theme', + 'repoze.sphinx.autointerface', + ], + }, install_requires=[ - 'setuptools', - 'pytz', - 'zope.browser>=1.1', - 'zope.browserpage>=3.11.0', - 'zope.component', - 'zope.event', - 'zope.i18n', - 'zope.i18nmessageid', - 'zope.interface', - 'zope.lifecycleevent', - 'zope.publisher', - 'zope.schema>=3.5.1', - 'zope.security', - 'zope.traversing', - 'zope.datetime', - ], - tests_require = [ + 'setuptools', + 'pytz', + 'zope.browser>=1.1', + 'zope.browserpage>=3.11.0', + 'zope.component', + 'zope.event', + 'zope.i18n', + 'zope.i18nmessageid', + 'zope.interface', + 'zope.lifecycleevent', + 'zope.publisher', + 'zope.schema>=3.5.1', + 'zope.security', + 'zope.traversing', + 'zope.datetime', + ], + tests_require=[ 'zope.configuration', 'zope.testing', 'zope.testrunner', - ], - test_suite = '__main__.alltests', - include_package_data = True, - zip_safe = False, - ) + ], + test_suite='__main__.alltests', + include_package_data=True, + zip_safe=False, +) diff --git a/src/zope/formlib/TODO.txt b/src/zope/formlib/TODO.rst similarity index 100% rename from src/zope/formlib/TODO.txt rename to src/zope/formlib/TODO.rst diff --git a/src/zope/formlib/bugs.txt b/src/zope/formlib/bugs.rst similarity index 100% rename from src/zope/formlib/bugs.txt rename to src/zope/formlib/bugs.rst diff --git a/src/zope/formlib/errors.txt b/src/zope/formlib/errors.rst similarity index 87% rename from src/zope/formlib/errors.txt rename to src/zope/formlib/errors.rst index 6f60521..b313855 100644 --- a/src/zope/formlib/errors.txt +++ b/src/zope/formlib/errors.rst @@ -1,14 +1,17 @@ -Error handling -============== +================ + Error handling +================ + +.. currentmodule:: zope.formlib.interfaces These are a couple of functional tests that were written on-the-go ... In the future this might become more extensive ... Displaying invalidation errors ------------------------------- +============================== Validation errors, e.g. cause by invariants, are converted into readable text -by adapting them to IWidgetInputErrorView: +by adapting them to `IWidgetInputErrorView`: >>> from zope.publisher.browser import TestRequest >>> from zope.interface.exceptions import Invalid @@ -20,7 +23,7 @@ by adapting them to IWidgetInputErrorView: >>> message u'<span class="error">You are wrong!</span>' -Interface invariant methods raise zope.interface.Invalid exception. Test if +Interface invariant methods raise `zope.interface.Invalid` exception. Test if this exception gets handled by the error_views. >>> myError = Invalid('My error message') @@ -54,9 +57,9 @@ And yes, we can even handle an i18n message in an Invalid exception: u'<span class="error">[[my.domain][My i18n error message]]</span>' Displaying widget input errors ------------------------------- +============================== -WidgetInputError exceptions also work with i18n messages: +`WidgetInputError` exceptions also work with i18n messages: >>> from zope.formlib.interfaces import WidgetInputError >>> myError = WidgetInputError( diff --git a/src/zope/formlib/form.py b/src/zope/formlib/form.py index 0d30b80..b6196ef 100644 --- a/src/zope/formlib/form.py +++ b/src/zope/formlib/form.py @@ -10,7 +10,9 @@ # FOR A PARTICULAR PURPOSE. # ############################################################################## -"""Forms +"""Forms. + +This module provides the `zope.formlib.interfaces.IFormAPI` interface. """ import binascii import datetime @@ -67,7 +69,8 @@ def expandPrefix(prefix): return prefix @interface.implementer(interfaces.IFormField) -class FormField: +class FormField(object): + """Implementation of `zope.formlib.interfaces.IFormField`. """ def __init__(self, field, name=None, prefix='', for_display=None, for_input=None, custom_widget=None, @@ -97,6 +100,7 @@ def _initkw(keep_readonly=(), omit_readonly=False, **defaults): @interface.implementer(interfaces.IFormFields) class FormFields(object): + """Implementation of `zope.formlib.interfaces.IFormFields`.""" def __init__(self, *args, **kw): keep_readonly, omit_readonly, defaults = _initkw(**kw) @@ -186,6 +190,7 @@ def fields(*args, **kw): @interface.implementer(interfaces.IWidgets) class Widgets(object): + """Implementation of `zope.formlib.interfaces.IWidgets`.""" def __init__(self, widgets, prefix_length=None, prefix=None): self.__Widgets_widgets_items__ = widgets @@ -239,7 +244,7 @@ def canWrite(context, field): def setUpWidgets(form_fields, form_prefix=None, context=None, request=None, form=None, data=(), adapters=None, ignore_request=False): - + """Sets up widgets.""" if request is None: request = form.request if context is None and form is not None: @@ -340,6 +345,7 @@ def _createWidget(form_field, field, request, iface): def getWidgetsData(widgets, form_prefix, data): + """See `zope.formlib.interfaces.IFormAPI.getWidgetsData`""" errors = [] form_prefix = expandPrefix(form_prefix) @@ -508,6 +514,7 @@ class FormData: def checkInvariants(form_fields, form_data, context): + """See `zope.formlib.interfaces.IFormAPI.checkInvariants`""" # First, collect the data for the various schemas schema_data = {} @@ -561,6 +568,7 @@ def applyData(context, form_fields, data, adapters=None): return descriptions def applyChanges(context, form_fields, data, adapters=None): + """See `zope.formlib.interfaces.IFormAPI.applyChanges`""" return bool(applyData(context, form_fields, data, adapters)) @@ -575,6 +583,7 @@ def _callify(meth): @interface.implementer(interfaces.IAction) class Action(object): + """See `zope.formlib.interfaces.IAction`""" _identifier = re.compile('[A-Za-z][a-zA-Z0-9_]*$') def __init__(self, label, success=None, failure=None, @@ -663,7 +672,8 @@ def render_submit_button(self): (self.__name__, self.__name__, escape(label, quote=True)) ) -class action: +class action(object): + """See `zope.formlib.interfaces.IFormAPI.action`""" def __init__(self, label, actions=None, **options): caller_locals = sys._getframe(1).f_locals if actions is None: @@ -716,7 +726,7 @@ class Actions(object): return self.__class__(*[a.__get__(inst) for a in self.actions]) def handleSubmit(actions, data, default_validate=None): - + """Handle a submit.""" for action in actions: if action.submitted(): errors = action.validate(data) diff --git a/src/zope/formlib/form.txt b/src/zope/formlib/form.rst similarity index 96% rename from src/zope/formlib/form.txt rename to src/zope/formlib/form.rst index dce6721..fd2e764 100644 --- a/src/zope/formlib/form.txt +++ b/src/zope/formlib/form.rst @@ -1,5 +1,8 @@ -Forms -===== +======= + Forms +======= + +.. currentmodule:: zope.formlib.form Forms are web components that use widgets to display and input data. Typically a template displays the widgets by accessing an attribute or @@ -35,7 +38,7 @@ schema to the `Fields` constructor: >>> class MyForm: ... form_fields = form.Fields(IOrder) -This sets up a set of form fields from the interface, IOrder. +This sets up a set of form fields from the interface, ``IOrder``. >>> len(MyForm.form_fields) 6 @@ -58,7 +61,7 @@ or by omitting fields: >>> [w.__name__ for w in MyForm.form_fields.omit('now', 'identifier')] ['name', 'min_size', 'max_size', 'color'] -We can omit read-only fields using the omit_readonly option when +We can omit read-only fields using the *omit_readonly* option when setting up the fields: >>> class MyForm: @@ -68,7 +71,7 @@ setting up the fields: Getting HTML ------------- +============ Having defined form fields, we can use them to generate HTML forms. Typically, this is done at run time by form class @@ -86,8 +89,8 @@ instances. Let's look at an example that displays some input widgets: ... ignore_request=ignore_request) ... return '\n'.join([w() for w in widgets]) -Here we used ``form.setUpWidgets`` to create widget instances from our -form-field specifications. The second argument to ``setUpWidgets`` is a +Here we used `setUpWidgets` to create widget instances from our +form-field specifications. The second argument to `setUpWidgets` is a form prefix. All of the widgets on this form are given the same prefix. This allows multiple forms to be used within a single form tag, assuming that each form uses a different form prefix. @@ -124,8 +127,8 @@ output: Sometimes we don't want this behavior: we want to ignore the request values, particularly after a form has been processed and before it is drawn again. -This can be accomplished with the 'ignore_request' argument in -setUpWidgets. +This can be accomplished with the *ignore_request* argument in +`setUpWidgets`. >>> print(MyForm(None, request)(ignore_request=True)) ... # doctest: +NORMALIZE_WHITESPACE @@ -140,10 +143,10 @@ setUpWidgets. Reading data ------------- +============ Of course, we don't just want to display inputs. We want to get the -input data. We can use getWidgetsData for that: +input data. We can use `getWidgetsData` for that: >>> from pprint import pprint >>> class MyForm: @@ -250,7 +253,7 @@ If we provide valid data, we'll get the data back: It's up to the form to decide what to do with the information. Invariants ----------- +========== The `getWidgetsData` function checks individual field constraints. Interfaces can also provide invariants that we may also want to check. @@ -271,7 +274,7 @@ greater than or equal to the minimum: ... if order.max_size < order.min_size: ... raise interface.Invalid("Maximum is less than Minimum") -We can update our form to check the invariant using 'checkInvariants': +We can update our form to check the invariant using `checkInvariants`: >>> class MyForm: ... form_fields = form.Fields(IOrder, omit_readonly=True) @@ -383,7 +386,7 @@ invariants are ignored: Edit Forms ----------- +========== A common application of forms is edit forms. Edit forms are special in 2 ways: @@ -395,9 +398,9 @@ in 2 ways: object being edited. The form package provides some functions to assist with creating edit -forms. When we set up our form_fields, we use the `render_context` -option, which uses data from the context passed to setUpWidgets. -Let's create a content class that provides `IOrder` and a simple form +forms. When we set up our form_fields, we use the *render_context* +option, which uses data from the context passed to `setUpWidgets`. +Let's create a content class that provides ``IOrder`` and a simple form that uses it: >>> import datetime @@ -438,7 +441,7 @@ that uses it: Note that, in this case, we got the values from the request, because we used an old request. If we want to redraw the form after processing a -request, it is safest to pass ignore_request = True to setUpWidgets so that +request, it is safest to pass ``ignore_request = True`` to `setUpWidgets` so that the form is redrawn with the values as found in the object, not on the request. >>> print(MyForm(order, request)(ignore_request=True)) @@ -582,11 +585,11 @@ changes were applied: because the new and old values are the same. -The code we included in `MyForm` above is generic: it applies to any +The code we included in ``MyForm`` above is generic: it applies to any edit form. Actions -------- +======= Our commit logic is a little complicated. It would be far more complicated if there were multiple submit buttons. @@ -752,9 +755,9 @@ Lets walk through the `__call__` method. - We set up our widgets as before. -- We use `form.handleSubmit` to validate our data. We pass the form, - actions, prefix, and `validate` method. For each action, - `form.handleSubmit` checks to see if the action was submitted. If the +- We use `handleSubmit` to validate our data. We pass the form, + actions, prefix, and ``validate`` method. For each action, + `handleSubmit` checks to see if the action was submitted. If the action was submitted, it checks to see if it has a validator. If the action has a validator, the action's validator is called, otherwise the validator passed is called. The validator result (a @@ -869,13 +872,13 @@ Ah, much better. And our order has been updated: 1.0 Helpful base classes --------------------- +==================== Our form has a lot of repetitive code. A number of helpful base classes provide standard form implementation. Form -~~~~ +---- The `Form` base class provides a number of common attribute definitions. It provides: @@ -953,16 +956,16 @@ We inherited most of our behavior from the base class. We also used the `action` decorator. The action decorator: -- creates an `actions` variable if one isn't already created, +- creates an ``actions`` variable if one isn't already created, - defines an action with the given label and any other arguments, and -- appends the action to the `actions` list. +- appends the action to the ``actions`` list. The `action` decorator accepts the same arguments as the `Action` -class with the exception of the `success` option. +class with the exception of the *success* option. -The creation of the `actions` is a bit magic, but provides +The creation of the ``actions`` is a bit magic, but provides simplification in common cases. Now we can try out our form: @@ -1013,7 +1016,7 @@ Now we can try out our form: 20.0 EditForm -~~~~~~~~ +-------- Our `handle_edit_action` action is common to edit forms. An `EditForm` base class captures this commonality. It also sets up @@ -1076,11 +1079,11 @@ Note that `EditForm` shows the date and time when content are modified. Multiple Schemas and Adapters ------------------------------ +============================= Forms can use fields from multiple schemas. This can be done in a number of ways. For example, multiple schemas can be passed to -`form.Fields`: +`Fields`: >>> class IDescriptive(interface.Interface): ... title = schema.TextLine(title=u"Title") @@ -1202,11 +1205,11 @@ name: If you aren't using `EditForm`, you can get a dictionary populated in the same way by `setUpWidgets` by passing the dictionary as an -`adapters` keyword argument. +*adapters* keyword argument. Named Widget Access -------------------- +=================== The value returned from `setUpWidgets` supports named-based lookup as well as iteration: @@ -1224,18 +1227,18 @@ iteration: 'form.title' Form-field manipulations ------------------------- +======================== The form-field constructor is very flexible. We've already seen that we can supply multiple schemas. Here are some other things you can do. Specifying individual fields -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +---------------------------- You can specify individual fields for a form. Here, we'll create a -form that collects just the name from `IOrder` and the title from -`IDescriptive`: +form that collects just the name from ``IOrder`` and the title from +``IDescriptive``: >>> class MyForm(form.EditForm): ... form_fields = form.Fields(IOrder['name'], @@ -1266,7 +1269,7 @@ You can also use stand-alone fields: But make sure the fields have a '__name__', as was done above. Concatenating field collections -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------- It is sometimes convenient to combine multiple field collections. Field collections support concatenation. For example, we may want to @@ -1290,11 +1293,11 @@ combine field definitions: size="20" type="text" value="" /> Using fields for display -~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------ Normally, any writable fields get input widgets. We may want to indicate that some fields should be used for display only. We can do -this using the `for_display` option when setting up form_fields: +this using the *for_display* option when setting up form_fields: >>> class MyForm(form.EditForm): ... form_fields = ( @@ -1329,7 +1332,7 @@ by `EditForm` has a condition to prevent it's use when there are no input widgets. Check it out for an example of using action conditions. Using fields for input -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- We may want to indicate that some fields should be used for input even if the underlying schema field is read-only. We can do this using the @@ -1354,7 +1357,7 @@ if the underlying schema field is read-only. We can do this using the size="10" type="text" value="20.0" /> Displaying or editing raw data ------------------------------- +============================== Sometimes, you want to display or edit data that doesn't come from an object. One way to do this is to pass the data to setUpWidgets. @@ -1431,7 +1434,7 @@ our form fields: Note that we didn't get data from the request because we are using all display widgets. -Passing `ignore_request=True` to the `setUpWidgets` function ignores +Passing ``ignore_request=True`` to the `setUpWidgets` function ignores the request for all values passed in the data dictionary, in order to help with redrawing a form after a successful action handler. We'll fake that quickly by forcing ignore_request to be `True`. @@ -1461,7 +1464,7 @@ fake that quickly by forcing ignore_request to be `True`. Specifying Custom Widgets -------------------------- +========================= It is possible to use custom widgets for specific fields. This can be done for a variety of reasons, but the provided mechanism should work @@ -1482,7 +1485,7 @@ Let's create a simple custom widget to use in our demonstration:: ... return '<span class="iso-datetime">2005-05-04</span>' To set the custom widget factory for a field, assign to the -`custom_widget` attribute of the form field object:: +``custom_widget`` attribute of the form field object:: >>> class MyForm(form.Form): ... actions = () @@ -1497,7 +1500,7 @@ To set the custom widget factory for a field, assign to the <span class="iso-datetime">2005-05-04</span> Specifying Fields individually -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------ All of the previous examples set up fields as collections. We can also set up forms individually and pass them to the Fields @@ -1516,13 +1519,13 @@ written more simply as: <span class="iso-datetime">2005-05-04</span> Computing default values -~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------ We saw earlier that we could provide initial widget data by passing a dictionary to setUpWidgets. We can also supply a function or method name when we set up form fields. -We might like to include the `now` field in our forms. We can provide +We might like to include the ``now`` field in our forms. We can provide a function for getting the needed initial value: >>> import datetime @@ -1585,12 +1588,12 @@ Note that the function passed must take a form as an argument. The form base classes always pass the form to `setUpWidgets`. Advanced Usage Hints --------------------- +==================== This section documents patterns for advanced usage of the formlib package. Multiple button groups -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- Multiple button groups can be accomplished many ways, but the way we've found that reuses the most code is the following: @@ -1614,22 +1617,24 @@ that reuses the most code is the following: ... The template then can render the button groups separately--something like the -following, for instance: +following, for instance:: <input tal:repeat="action view/primary_actions" tal:replace="structure action/render" /> -and +and:: <input tal:repeat="action view/secondary_actions" tal:replace="structure action/render" /> -But the form machinery can still find the correct button. # TODO: demo +But the form machinery can still find the correct button. + +.. # TODO: demo the above Dividing display of widget errors and invariant errors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------ Even though the form machinery only has a single errors attribute, if designers wish to render widget errors differently than invariant errors, they can be @@ -1638,11 +1643,11 @@ all widget errors should implement zope.formlib.interfaces.IWidgetInputError, and invariant errors shouldn't, because they don't come from a widget. Therefore, a simple division such as the following should suffice. -# TODO +.. # TODO Omitting the form prefix -~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------ For certain use cases (e.g. forms that post data to a different server whose software you do not control) it is important to be able to generate forms @@ -1741,11 +1746,11 @@ It is also possible to keep the form prefix and just suppress the 'actions' pref value="Button" class="button" /> Additional Cases ----------------- +================ Automatic Context Adaptation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +---------------------------- As you may know already, the formlib will automatically adapt the context to find a widget and data for a particular field. In an early version of @@ -1808,7 +1813,7 @@ Here are some more places where the behavior was incorrect: Event descriptions -~~~~~~~~~~~~~~~~~~ +------------------ The ObjectModifiedEvent can be annotated with descriptions about the involved schemas and fields. The formlib provides these annotations with the help of the @@ -1841,7 +1846,7 @@ Cleanup: >>> zope.event.subscribers.remove(eventLog) Actions that cause a redirect -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------- When an action causes a redirect, the following `render` phase is omitted as the result will not be displayed anyway. This is both a performance @@ -1876,7 +1881,7 @@ information. render was called Prevent form submit for GET requests -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------ It can be useful to only accept form submits over POST requests. This, for example, prevents replaying data-modifying actions when reloading a page in a @@ -1975,14 +1980,15 @@ methods are accepted:: Action: handle {'title': 'Submitted Title'} Prevent Cross-site Request Forgery (CSRF) attacks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -See also: http://en.wikipedia.org/wiki/Cross-site_request_forgery. +------------------------------------------------- -The CSRF protection in zope.formlib assumes the attacker cannot get hold of -information stored in a cookie that is send to the domain handling the form -submit. zope.formlib verifies that the token as sent with the cookie is -identical to the value as sent with the form (as a hidden input field). +The `cross-site request forgery +<https://en.wikipedia.org/wiki/Cross-site_request_forgery.>`_ +protection in zope.formlib assumes the attacker cannot get hold of +information stored in a cookie that is send to the domain handling the +form submit. zope.formlib verifies that the token as sent with the +cookie is identical to the value as sent with the form (as a hidden +input field). zope.formlib will set a random token in the cookie when first accessing the form. Any subsequent form rendering and submit handling will use the token @@ -2008,8 +2014,8 @@ Issues to research: * Tests for applications that use form components with CSRF protection enabled, is cumbersome. Can we help that somehow? -* Is using os.urandom() for generating a token sufficient *and* - available cross-platform? Could uuid.uuid4() be an alternative? +* Is using `os.urandom` for generating a token sufficient *and* + available cross-platform? Could `uuid.uuid4` be an alternative? When first visting a form, a CSRF token will be set in the cookie:: @@ -2180,7 +2186,7 @@ We can indeed submit data to the forms:: >>> _ = multi() Action: handle in Form Two -There is a view for the InvalidCSRFTokenError:: +There is a view for the `.InvalidCSRFTokenError`:: >>> from zope.component import getMultiAdapter >>> from zope.formlib.interfaces import InvalidCSRFTokenError diff --git a/src/zope/formlib/interfaces.py b/src/zope/formlib/interfaces.py index 2d829ed..78d40ab 100644 --- a/src/zope/formlib/interfaces.py +++ b/src/zope/formlib/interfaces.py @@ -382,7 +382,7 @@ class ISubPage(Interface): class IFormAPI(Interface): - """API to facilitate creating forms, provided by zope.formlib.form + """API to facilitate creating forms, provided by `zope.formlib.form` """ def Field(schema_field, **options): @@ -420,7 +420,7 @@ class IFormAPI(Interface): def Fields(*arguments, **options): - """Create form-fields collection (IFormFields) + """Create form-fields collection (`IFormFields`) Creates a form-field collection from a collection of: @@ -651,13 +651,11 @@ class IFormAPI(Interface): that the options don't include the success option. The function is designed to be used as a decorator (Python 2.4 - and later), as in: - - @action("Edit") - def handle_edit(self, action, data): - ... - + and later), as in:: + @action("Edit") + def handle_edit(self, action, data): + ... """ def validate(form, actions, form_prefix, data, default_validate=None): @@ -779,7 +777,7 @@ class IFormBaseCustomization(ISubPage, IBrowserPage): class IFormFields(Interface): - """A colection of form fields (IFormField objects) + """A colection of form fields (`IFormField` objects) """ def __len__(): @@ -811,7 +809,7 @@ class IFormFields(Interface): """ def __add__(form_fields): - """Add two form fields collections (IFormFields) + """Add two form fields collections (`IFormFields`) Return a new IFormFields that is the concatination of the two IFormFields. @@ -820,8 +818,8 @@ class IFormFields(Interface): def select(*names): """Select fields with given names in order - Return a new IFormFields that is a selection from the original - IFormFields that has the named fields in the specified order. + Return a new `IFormFields` that is a selection from the original + `IFormFields` that has the named fields in the specified order. """ def omit(*names): diff --git a/src/zope/formlib/objectwidget.py b/src/zope/formlib/objectwidget.py index b49b0df..98c9f27 100644 --- a/src/zope/formlib/objectwidget.py +++ b/src/zope/formlib/objectwidget.py @@ -147,6 +147,7 @@ class ObjectWidget(BrowserWidget, InputWidget): def applyChanges(self, content): + """See `zope.formlib.interfaces.IInputWidget.applyChanges`""" field = self.context # create our new object value diff --git a/src/zope/formlib/objectwidget.txt b/src/zope/formlib/objectwidget.rst similarity index 80% rename from src/zope/formlib/objectwidget.txt rename to src/zope/formlib/objectwidget.rst index 510d400..0b0cc8e 100644 --- a/src/zope/formlib/objectwidget.txt +++ b/src/zope/formlib/objectwidget.rst @@ -2,7 +2,7 @@ Object Widget ============= -The following example shows a Family with Mother and Father. +The following example shows a ``Family`` with ``Mother`` and ``Father``. First define the interface for a person: >>> from zope.interface import Interface, implementer @@ -35,8 +35,9 @@ Let's define the interface family: ... required=False, ... schema=IPerson) -Let's define the class family with FieldProperty's mother and father -FieldProperty validate the values if they get added: +Let's define the class ``Family`` using +`zope.schema.fieldproperty.FieldProperty` for ``mother`` and ``father``. +``FieldProperty`` instances validate the values if they get added: >>> from zope.schema.fieldproperty import FieldProperty @@ -50,7 +51,7 @@ FieldProperty validate the values if they get added: ... self.mother = mother ... self.father = father -Let's make an instance of Family with None attributes: +Let's make an instance of Family with `None` attributes: >>> family = Family() >>> bool(family.mother == None) @@ -77,7 +78,7 @@ Let's define a dummy class which doesn't implements IPerson: ... def __init__(self, name=''): ... self.name = name -Raise a SchemaNotProvided exception if we add a Dummy instance to a Family +Raise a `zope.schema.interfaces.SchemaNotProvided` exception if we add a Dummy instance to a Family object: >>> foo = Dummy('foo') @@ -97,7 +98,7 @@ Now let's setup a enviroment for use the widget like in a real application: >>> from zope.formlib.widgets import ObjectWidget >>> from zope.formlib.interfaces import IInputWidget -Register the TextLine widget used in the IPerson interface for the field 'name'. +Register the `zope.schema.TextLine` widget used in the IPerson interface for the field 'name'. >>> from zope.publisher.interfaces.browser import IDefaultBrowserLayer >>> from zope.component import provideAdapter @@ -116,22 +117,21 @@ instance on the family object: >>> family.mother.name u'Margrith' -Now let's initialize a ObjectWidget with the right attributes: +Now let's initialize a `.ObjectWidget` with the right attributes: >>> mother_field = IFamily['mother'] >>> factory = Person >>> widget = ObjectWidget(mother_field, request, factory) -Now comes the magic. Apply changes means we force the ObjectWidget to read -the request, extract the value and save it on the content. The ObjectWidget +Now comes the magic. Apply changes means we force the `.ObjectWidget` to read +the request, extract the value and save it on the content. The `.ObjectWidget` instance uses a real Person class (factory) for add the value. The value is -temporary stored in this factory class. The ObjectWidget reads the value from +temporary stored in this factory class. The `.ObjectWidget` reads the value from this factory and set it to the attribute 'name' of the instance mother (The object mother is already there). If we don't have an instance mother already stored in the family object, the factory instance will be stored directly to the family attribute mother. For more information see the method -'applyChanges()' in the interface -zope.formlib.objectwidget.ObjectWidget. +`zope.formlib.objectwidget.ObjectWidget.applyChanges`. >>> widget.applyChanges(family) True @@ -145,4 +145,4 @@ Test the updated mother's name value on the object family: True So, now you know my mothers and fathers name. I hope it's also clear how to -use the Object field and the ObjectWidget. +use the `zope.schema.Object` field and the `.ObjectWidget`. diff --git a/src/zope/formlib/source.txt b/src/zope/formlib/source.rst similarity index 95% rename from src/zope/formlib/source.txt rename to src/zope/formlib/source.rst index 012a646..4b25e10 100644 --- a/src/zope/formlib/source.txt +++ b/src/zope/formlib/source.rst @@ -3,7 +3,7 @@ Source Widgets ============== Sources are objects that represent sets of values from which one might choose -and are used with Choice schema fields. Source widgets currently fall into two +and are used with `zope.schema.Choice` schema fields. Source widgets currently fall into two categories: - widgets for iterable sources @@ -23,7 +23,7 @@ All of our examples will be using the component architecture:: >>> import zope.component >>> import zope.schema -This `ITerms` implementation can be used for the sources involved in +This ``ITerms`` implementation can be used for the sources involved in our tests:: >>> import base64 @@ -67,10 +67,11 @@ We'll also need request objects:: Iterable Source Widgets ======================= -Iterable sources are expected to be simpler than queriable sources, so they -represent a good place to start. The most important aspect of iterable sources -for widgets is that it's actually possible to enumerate all the values from the -source. This allows each possible value to be listed in a <select> form field. +Iterable sources are expected to be simpler than queriable sources, so +they represent a good place to start. The most important aspect of +iterable sources for widgets is that it's actually possible to +enumerate all the values from the source. This allows each possible +value to be listed in a ``<select>`` form field. Let's start with a simple example. We have a very trivial source, which is basically a list:: @@ -79,7 +80,7 @@ which is basically a list:: ... class SourceList(list): ... pass -We need to register our `ITerms` view:: +We need to register our ``ITerms`` view:: >>> zope.component.provideAdapter( ... ListTerms, @@ -166,7 +167,7 @@ selected:: <input name="field.dog-empty-marker" type="hidden" value="1" /> </div> -Dropdown widgets are achieved with SourceDropdownWidget, which simply +Dropdown widgets are achieved with `.SourceDropdownWidget`, which simply generates a selection list of size 1:: >>> request = TestRequest() @@ -178,8 +179,8 @@ generates a selection list of size 1:: <select id="field.dog" name="field.dog" size="1" > <option selected="selected" value="">(nothing selected)</option>... -An alternative to SourceSelectWidget for small numbers of items is -SourceRadioWidget that provides a radio button group for the items:: +An alternative to `.SourceSelectWidget` for small numbers of items is +`.SourceRadioWidget` that provides a radio button group for the items:: >>> request = TestRequest() >>> widget = zope.formlib.source.SourceRadioWidget( @@ -227,13 +228,13 @@ We'll select an item by setting the appropriate fields in the request:: </div> For list-valued fields with items chosen from iterable sources, there are the -SourceMultiSelectWidget and SourceOrderedMultiSelectWidget widgets. The latter +`.SourceMultiSelectWidget` and `.SourceOrderedMultiSelectWidget` widgets. The latter widget includes support for re-ording the list items. -SourceOrderedMultiSelectWidget is configured as the default widget for lists of +`.SourceOrderedMultiSelectWidget` is configured as the default widget for lists of choices. If you don't need ordering support through the web UI, then you can use -the simpler SourceMultiSelectWidget:: +the simpler `.SourceMultiSelectWidget`:: >>> dogSource = SourceList([ ... u'spot', u'bowser', u'prince', u'duchess', u'lassie']) @@ -302,7 +303,7 @@ Finally, what does the widget look like now:: </div> -An alternative for small numbers of items is to use SourceMultiCheckBoxWidget:: +An alternative for small numbers of items is to use `.SourceMultiCheckBoxWidget`:: >>> request = TestRequest() >>> widget = zope.formlib.source.SourceMultiCheckBoxWidget( @@ -377,7 +378,7 @@ Finally, what does the widget look like now:: </div> -For list ordering support, use SourceOrderedMultiSelectWidget:: +For list ordering support, use `.SourceOrderedMultiSelectWidget`:: >>> request = TestRequest() >>> widget = zope.formlib.source.SourceOrderedMultiSelectWidget( @@ -427,7 +428,7 @@ Select two items:: ['spot', 'lassie'] -For set-valued fields, use SourceMultiSelectSetWidget:: +For set-valued fields, use `.SourceMultiSelectSetWidget`:: >>> dogSet = zope.schema.Set( ... __name__ = 'dogSet', @@ -509,7 +510,7 @@ The default widgets for selecting values from sources use the following approach: - One or more query objects are obtained from the source by adapting the source - to `zope.schema.ISourceQueriables`. If no adapter is obtained, then the + to `zope.schema.interfaces.ISourceQueriables`. If no adapter is obtained, then the source itself is assumed to be queriable. - For each queriable found, a @@ -524,13 +525,13 @@ which is basically a list: ... class SourceList(list): ... pass -We need to register our `ITerms` view:: +We need to register our ``ITerms`` view:: >>> zope.component.provideAdapter( ... ListTerms, ... (SourceList, zope.publisher.interfaces.browser.IBrowserRequest)) -We aren't going to provide an adapter to `ISourceQueriables`, so the source +We aren't going to provide an adapter to ``ISourceQueriables``, so the source itself will be used as it's own queriable. We need to provide a query view for the source:: @@ -611,8 +612,10 @@ in the form and by "selecting" the submit button:: >>> request.form['field.dog.query.string'] = u'o' >>> request.form['field.dog.query'] = u'Search' -Because the field is required, a non-selection is not valid. Thus, while the -widget still hasInput, it will raise an error when you getInputValue:: +Because the field is required, a non-selection is not valid. Thus, +while the widget still +`~zope.formlib.interfaces.IInputWidget.hasInput`, it will raise an +error when you `~zope.formlib.interfaces.IInputWidget.getInputValue`:: >>> widget.hasInput() True @@ -727,7 +730,7 @@ combines multiple sources:: ... def getQueriables(self): ... return self.sources -This multi-source implements `ISourceQueriables`. It assumes that the sources +This multi-source implements ``ISourceQueriables``. It assumes that the sources it's given are queriable and just returns the sources as the queryable objects. We can reuse our terms view:: @@ -1059,7 +1062,7 @@ Using vocabulary-dependent widgets with sources if you have a widget that uses old-style vocabularies but don't have the time to rewrite it for sources, all is not lost! The wrapper -IterableSourceVocabulary can be used to make sources and ITerms look like a +`.IterableSourceVocabulary` can be used to make sources and ``ITerms`` look like a vocabulary. This allows us to use vocabulary-based widgets with sources instead of vocabularies. diff --git a/src/zope/formlib/widgets.txt b/src/zope/formlib/widgets.rst similarity index 74% rename from src/zope/formlib/widgets.txt rename to src/zope/formlib/widgets.rst index 34cec56..486ebfb 100644 --- a/src/zope/formlib/widgets.txt +++ b/src/zope/formlib/widgets.rst @@ -1,11 +1,16 @@ -Browser Widgets -=============== +================= + Browser Widgets +================= + +.. currentmodule:: zope.formlib.interfaces Formlib defines widgets: views on bound schema fields. Many of these -are straightforward. For instance, see the `TextWidget` in -textwidgets.py, which is a subclass of BrowserWidget in widget.py. It -is registered as an `IBrowserRequest` view of an `ITextLine` schema -field, providing the `IInputWidget` interface:: +are straightforward. For instance, see the `.TextWidget` in +textwidgets.py, which is a subclass of `.BrowserWidget` in widget.py. +It is registered as an +`zope.publisher.interfaces.browser.IBrowserRequest` view of an +`zope.schema.interfaces.ITextLine` schema field, providing the +`IInputWidget` interface:: <view type="zope.publisher.interfaces.browser.IBrowserRequest" @@ -16,66 +21,68 @@ field, providing the `IInputWidget` interface:: /> The widget then receives the field and the request as arguments to the factory -(i.e., the `TextWidget` class). +(i.e., the `.TextWidget` class). Some widgets in formlib extend this pattern. The widget registration -is extended for `Choice` fields and for the `collection` fields. +is extended for `zope.schema.Choice` fields and for the ``collection`` fields. Default Choice Field Widget Registration and Lookup ---------------------------------------------------- +=================================================== All field widgets are obtained by looking up a browser `IInputWidget` -or `IDisplayWidget` view for the field object. For `Choice` fields, +or `IDisplayWidget` view for the field object. For `zope.schema.Choice` fields, the default registered widget defers all of its behavior to the result of another lookup: a browser widget view for the field *and* the Choice field's vocabulary. -This allows registration of Choice widgets that differ on the basis of the -vocabulary type. For example, a widget for a vocabulary of images might have -a significantly different user interface than a widget for a vocabulary of -words. A dynamic vocabulary might implement `IIterableVocabulary` if its -contents are below a certain length, but not implement the marker "iterable" -interface if the number of possible values is above the threshhold. +This allows registration of Choice widgets that differ on the basis of +the vocabulary type. For example, a widget for a vocabulary of images +might have a significantly different user interface than a widget for +a vocabulary of words. A dynamic vocabulary might implement +`zope.schema.interfaces.IIterableVocabulary` if its contents are below +a certain length, but not implement the marker "iterable" interface if +the number of possible values is above the threshhold. This also means that choice widget factories are called with with an additional argument. Rather than being called with the field and the request as arguments, choice widgets receive the field, vocabulary, and request as arguments. -Some `Choice` widgets may also need to provide a source interface, +Some `zope.schema.Choice` widgets may also need to provide a source interface, particularly if the vocabulary is too big to iterate over. Default Collection Field Widget Registration and Lookup -------------------------------------------------------- +======================================================= The default configured lookup for collection fields -- List, Tuple, and Set, for instance -- begins with the usual lookup for a browser widget view for the field object. This widget defers its display to the result of another lookup: -a browser widget view registered for the field and the field's `value_type` +a browser widget view registered for the field and the field's ``value_type`` (the type of the contained values). This allows registrations for collection widgets that differ on the basis of the members -- a widget for entering a list of text strings might differ significantly from a widget for entering a list of dates...or even a list of choices, as discussed below. -This registration pattern has three implications that should be highlighted. +This registration pattern has three implications that should be highlighted. -* First, collection fields that do not specify a `value_type` probably cannot +* First, collection fields that do not specify a ``value_type`` probably cannot have a reasonable widget. * Second, collection widgets that wish to be the default widget for a - collection with any `value_type` should be registered for the collection - field and a generic value_type: the `IField` interface. Do not register the - generic widget for the collection field only or you will break the lookup - behavior as described here. + collection with any ``value_type`` should be registered for the + collection field and a generic value_type: the + `zope.schema.interfaces.IField` interface. Do not register the + generic widget for the collection field only or you will break the + lookup behavior as described here. * Third, like choice widget factories, sequence widget factories (classes or functions) take three arguments. Typical sequence widgets receive the - field, the `value_type`, and the request as arguments. + field, the ``value_type``, and the request as arguments. Collections of Choices -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- -If a collection field's `value_type` is a `Choice` field, the second widget +If a collection field's ``value_type`` is a `zope.schema.Choice` field, the second widget again defers its behavior, this time to a third lookup based on the collection field and the choice's vocabulary. This means that a widget for a list of large image choices can be different than a widget for a list of small image @@ -88,13 +95,13 @@ a "unique" or "lenient" token depending on the field's value, but this is not enabled in the default Zope 3 configuration. Registering Widgets for a New Collection Field Type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------------------------- Because of this lookup pattern, basic widget registrations for new field types must follow a recipe. For example, a developer may introduce a new Bag field type for simple shopping cart functionality and wishes to add widgets for it within the default Zope 3 collection widget registration. The bag widgets -should be registered something like this. +should be registered something like this. The only hard requirement is that the developer must register the bag + choice widget: the widget is just the factory for the third dispatch as described @@ -120,7 +127,7 @@ above, so the developer can use the already implemented widgets listed below:: Beyond this, the developer may also have a generic bag widget she wishes to register. This might look something like this, assuming there's a -`BagSequenceWidget` available in this package:: +``BagSequenceWidget`` available in this package:: <view type="zope.publisher.interfaces.browser.IBrowserRequest" @@ -132,8 +139,8 @@ register. This might look something like this, assuming there's a /> Then any widgets for the bag and a vocabulary would be registered according to -this general pattern, in which `IIterableVocabulary` would be the interface of -any appropriate vocabulary and `BagWidget` is some appropriate widget:: +this general pattern, in which `zope.schema.interfaces.IIterableVocabulary` would be the interface of +any appropriate vocabulary and ``BagWidget`` is some appropriate widget:: <view type="zope.publisher.interfaces.browser.IBrowserRequest" @@ -146,7 +153,7 @@ any appropriate vocabulary and `BagWidget` is some appropriate widget:: Choice widgets and the missing value ------------------------------------- +==================================== Choice widgets for a non-required field include a "no value" item to allow for not selecting any value at all. This value used to be omitted for required @@ -162,7 +169,7 @@ Starting with version zope.app.form 3.6.0, dropdown widgets for required fields display a "no value" item even for required fields if an arbitrary value would have to be selected by the widget otherwise. -To switch the old behaviour back on for backwards compatibility, do +To switch the old behaviour back on for backwards compatibility, do:: zope.formlib.itemswidgets.EXPLICIT_EMPTY_SELECTION = False diff --git a/tox.ini b/tox.ini index d405848..ee51bae 100644 --- a/tox.ini +++ b/tox.ini @@ -1,9 +1,15 @@ [tox] envlist = - py27,py34,py35,py36,pypy,pypy3 + py27,py34,py35,py36,pypy,pypy3,docs [testenv] commands = zope-testrunner --test-path=src deps = .[test] + +[testenv:docs] +commands = + sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html +deps = + .[docs]
Move docs to RTD I was trying to cross-reference this project and discovered I couldn't.
zopefoundation/zope.formlib
diff --git a/src/zope/formlib/tests/test_formlib.py b/src/zope/formlib/tests/test_formlib.py index ef4017c..3b1b029 100644 --- a/src/zope/formlib/tests/test_formlib.py +++ b/src/zope/formlib/tests/test_formlib.py @@ -759,7 +759,7 @@ def test_suite(): ]) return unittest.TestSuite(( doctest.DocFileSuite( - '../errors.txt', + '../errors.rst', setUp=formSetUp, tearDown=tearDown, checker=checker, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS, ), @@ -770,7 +770,7 @@ def test_suite(): # optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS, # ), doctest.DocFileSuite( - '../form.txt', + '../form.rst', setUp=formSetUp, tearDown=tearDown, optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS, checker=checker diff --git a/src/zope/formlib/tests/test_objectwidget.py b/src/zope/formlib/tests/test_objectwidget.py index f3031c8..70ef285 100644 --- a/src/zope/formlib/tests/test_objectwidget.py +++ b/src/zope/formlib/tests/test_objectwidget.py @@ -145,7 +145,7 @@ def test_suite(): return unittest.TestSuite(( unittest.makeSuite(ObjectWidgetTest), doctest.DocFileSuite( - '../objectwidget.txt', + '../objectwidget.rst', setUp=testing.setUp, tearDown=testing.tearDown, checker=checker), doctest.DocTestSuite(), diff --git a/src/zope/formlib/tests/test_source.py b/src/zope/formlib/tests/test_source.py index 5a9bdec..05634e9 100644 --- a/src/zope/formlib/tests/test_source.py +++ b/src/zope/formlib/tests/test_source.py @@ -19,10 +19,9 @@ from .support import checker def test_suite(): import doctest return doctest.DocFileSuite( - '../source.txt', + '../source.rst', setUp=testing.setUp, tearDown=testing.tearDown, checker=checker) if __name__ == '__main__': import unittest unittest.main(defaultTest='test_suite') -
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 14 }
4.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work multipart==1.1.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 python-gettext==4.1 pytz==2025.2 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work transaction==3.1.0 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work zope.browser==2.4 zope.browserpage==4.4.0 zope.component==5.1.0 zope.configuration==4.4.1 zope.contenttype==4.6 zope.datetime==4.3.0 zope.deprecation==4.4.0 zope.event==4.6 zope.exceptions==4.6 -e git+https://github.com/zopefoundation/zope.formlib.git@0813545c8c2153cff0577679fec6bb8f00332a8d#egg=zope.formlib zope.hookable==5.4 zope.i18n==4.9.0 zope.i18nmessageid==5.1.1 zope.interface==5.5.2 zope.lifecycleevent==4.4 zope.location==4.3 zope.pagetemplate==4.6.0 zope.proxy==4.6.1 zope.publisher==6.1.0 zope.schema==6.2.1 zope.security==5.8 zope.tal==4.5 zope.tales==5.2 zope.testing==5.0.1 zope.testrunner==5.6 zope.traversing==4.4.1
name: zope.formlib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - multipart==1.1.0 - python-gettext==4.1 - pytz==2025.2 - six==1.17.0 - transaction==3.1.0 - zope-browser==2.4 - zope-browserpage==4.4.0 - zope-component==5.1.0 - zope-configuration==4.4.1 - zope-contenttype==4.6 - zope-datetime==4.3.0 - zope-deprecation==4.4.0 - zope-event==4.6 - zope-exceptions==4.6 - zope-hookable==5.4 - zope-i18n==4.9.0 - zope-i18nmessageid==5.1.1 - zope-interface==5.5.2 - zope-lifecycleevent==4.4 - zope-location==4.3 - zope-pagetemplate==4.6.0 - zope-proxy==4.6.1 - zope-publisher==6.1.0 - zope-schema==6.2.1 - zope-security==5.8 - zope-tal==4.5 - zope-tales==5.2 - zope-testing==5.0.1 - zope-testrunner==5.6 - zope-traversing==4.4.1 prefix: /opt/conda/envs/zope.formlib
[ "src/zope/formlib/tests/test_formlib.py::test_suite", "src/zope/formlib/tests/test_objectwidget.py::test_suite", "src/zope/formlib/tests/test_source.py::test_suite" ]
[]
[ "src/zope/formlib/tests/test_formlib.py::test_error_handling", "src/zope/formlib/tests/test_formlib.py::test_form_template_i18n", "src/zope/formlib/tests/test_formlib.py::test_setUpWidgets_prefix", "src/zope/formlib/tests/test_objectwidget.py::ObjectWidgetTest::test_applyChanges", "src/zope/formlib/tests/test_objectwidget.py::ObjectWidgetTest::test_applyChangesNoChange", "src/zope/formlib/tests/test_objectwidget.py::ObjectWidgetTest::test_error" ]
[]
Zope Public License 2.1
2,834
[ "docs/errors.rst", "MANIFEST.in", "docs/objectwidget.rst", "src/zope/formlib/bugs.txt", ".gitignore", "docs/api/index.rst", "docs/index.rst", "docs/conf.py", "src/zope/formlib/form.py", "src/zope/formlib/form.txt", "src/zope/formlib/source.txt", "tox.ini", "docs/source.rst", "src/zope/formlib/errors.txt", "README.rst", "src/zope/formlib/widgets.txt", "setup.py", "src/zope/formlib/objectwidget.txt", "CHANGES.rst", "src/zope/formlib/TODO.txt", "src/zope/formlib/interfaces.py", "docs/widgets.rst", "rtd-requirements.txt", "docs/form.rst", "docs/changelog.rst", "src/zope/formlib/objectwidget.py" ]
[ "docs/errors.rst", "src/zope/formlib/widgets.rst", "MANIFEST.in", "docs/objectwidget.rst", ".gitignore", "docs/api/index.rst", "docs/index.rst", "docs/conf.py", "src/zope/formlib/TODO.rst", "src/zope/formlib/objectwidget.rst", "src/zope/formlib/form.py", "src/zope/formlib/source.rst", "tox.ini", "docs/source.rst", "README.rst", "src/zope/formlib/errors.rst", "setup.py", "CHANGES.rst", "src/zope/formlib/interfaces.py", "docs/widgets.rst", "src/zope/formlib/bugs.rst", "rtd-requirements.txt", "docs/form.rst", "docs/changelog.rst", "src/zope/formlib/form.rst", "src/zope/formlib/objectwidget.py" ]
sigmavirus24__github3.py-873
6824ebce0059fce75bace08bb4cfb37e2329a2c7
2018-07-27 11:31:15
b8e7aa8eb221cd1eec7a8bc002b75de8098dc77a
jacquerie: > I want to record them by requesting a review on this very PR Ach, I can't because I'm not part of the collaborators on this repo. Is the following an acceptable UX for the error case? ``` In [6]: from github3 import GitHub In [7]: github = GitHub(github_user, github_pass) In [8]: repository = github.repository('sigmavirus24', 'github3.py') In [9]: pull_request = repository.pull_request(873) In [10]: pull_request.create_review_requests(reviewers=['sigmavirus24']) --------------------------------------------------------------------------- UnprocessableEntity Traceback (most recent call last) <ipython-input-10-2125937603d7> in <module>() ----> 1 pull_request.create_review_requests(reviewers=['sigmavirus24']) /home/jacquerie/Code/github3.py/src/github3/decorators.pyc in auth_wrapper(self, *args, **kwargs) 28 def auth_wrapper(self, *args, **kwargs): 29 if hasattr(self, 'session') and self.session.has_auth(): ---> 30 return func(self, *args, **kwargs) 31 else: 32 from .exceptions import error_for /home/jacquerie/Code/github3.py/src/github3/pulls.pyc in create_review_requests(self, reviewers, team_reviewers) 313 if team_reviewers is not None: 314 data['team_reviewers'] = team_reviewers --> 315 json = self._json(self._post(url, data=data), 201) 316 return self._instance_or_null(PullReview, json) 317 /home/jacquerie/Code/github3.py/src/github3/models.pyc in _json(self, response, expected_status_code, include_cache_info) 154 if actual_status_code != expected_status_code: 155 if actual_status_code >= 400: --> 156 raise exceptions.error_for(response) 157 158 if actual_status_code == 304: UnprocessableEntity: 422 Validation Failed ``` sigmavirus24: Yeah that exception is fine. It's what someone would see if they tried this with just th eplain API. Once you accept the invite to join the project, you should be able to record those integration tests with this very PR. You'll also need to rebase so it can be merged. jacquerie: Thanks again for adding me the collaborators! Interestingly, I ran into https://github.com/sigmavirus24/github3.py/issues/825 while testing my current implementation: ``` In [11]: pull_request.create_review_requests(reviewers=['sigmavirus24']) --------------------------------------------------------------------------- IncompleteResponse Traceback (most recent call last) <ipython-input-11-2125937603d7> in <module>() ----> 1 pull_request.create_review_requests(reviewers=['sigmavirus24']) /home/jacquerie/Code/github3.py/src/github3/decorators.pyc in auth_wrapper(self, *args, **kwargs) 28 def auth_wrapper(self, *args, **kwargs): 29 if hasattr(self, 'session') and self.session.has_auth(): ---> 30 return func(self, *args, **kwargs) 31 else: 32 from .exceptions import error_for /home/jacquerie/Code/github3.py/src/github3/pulls.pyc in create_review_requests(self, reviewers, team_reviewers) 314 data['team_reviewers'] = team_reviewers 315 json = self._json(self._post(url, data=data), 201) --> 316 return self._instance_or_null(PullReview, json) 317 318 @requires_auth /home/jacquerie/Code/github3.py/src/github3/models.pyc in _instance_or_null(self, instance_class, json) 144 return None 145 --> 146 return instance_class(json, self) 147 148 def _json(self, response, expected_status_code, include_cache_info=True): /home/jacquerie/Code/github3.py/src/github3/models.pyc in __init__(self, json, session) 48 self._update_attributes(json) 49 except KeyError as kerr: ---> 50 raise exceptions.IncompleteResponse(json, kerr) 51 52 def _update_attributes(self, json): IncompleteResponse: None The library was expecting more data in the response (KeyError(u'commit_id',)). Either GitHub modified it's response body, or your token is not properly scoped to retrieve this information. ``` jacquerie: @sigmavirus24 Sorry for the noise! Now it should actually be ready for review. > Interestingly, I ran into #825 while testing my current implementation I was wrong about this: I was mistakenly using the `PullReview` model instead of the `ShortPullRequest` one.
diff --git a/src/github3/pulls.py b/src/github3/pulls.py index 9f16b1e0..fd04c2f2 100644 --- a/src/github3/pulls.py +++ b/src/github3/pulls.py @@ -194,6 +194,7 @@ class _PullRequest(models.GitHubCore): class_name = '_PullRequest' def _update_attributes(self, pull): + from . import orgs self._api = pull['url'] self.assignee = pull['assignee'] if self.assignee is not None: @@ -217,6 +218,12 @@ class _PullRequest(models.GitHubCore): self.merged_at = self._strptime(pull['merged_at']) self.number = pull['number'] self.patch_url = pull['patch_url'] + requested_reviewers = pull.get('requested_reviewers', []) + self.requested_reviewers = [ + users.ShortUser(r, self) for r in requested_reviewers] + requested_teams = pull.get('requested_teams', []) + self.requested_teams = [ + orgs.ShortTeam(t, self) for t in requested_teams] self.review_comment_urlt = URITemplate(pull['review_comment_url']) self.review_comments_url = pull['review_comments_url'] self.repository = None @@ -286,6 +293,29 @@ class _PullRequest(models.GitHubCore): json = self._json(self._post(url, data=data), 201) return self._instance_or_null(ReviewComment, json) + @requires_auth + def create_review_requests(self, reviewers=None, team_reviewers=None): + """Ask for reviews on this pull request. + + :param list reviewers: + The users to which request a review + :param list team_reviewers: + The teams to which request a review + :returns: + The pull request on which the reviews were requested + :rtype: + :class:`~github3.pulls.ShortPullRequest` + """ + url = self._build_url('requested_reviewers', base_url=self._api) + data = {} + if reviewers is not None: + data['reviewers'] = [getattr(r, 'login', r) for r in reviewers] + if team_reviewers is not None: + data['team_reviewers'] = [ + getattr(t, 'slug', t) for t in team_reviewers] + json = self._json(self._post(url, data=data), 201) + return self._instance_or_null(ShortPullRequest, json) + @requires_auth def create_review(self, body, commit_id=None, event=None, comments=None): """Create a review comment on this pull request. @@ -334,6 +364,28 @@ class _PullRequest(models.GitHubCore): json = self._json(self._post(url, data=data), 200) return self._instance_or_null(PullReview, json) + @requires_auth + def delete_review_requests(self, reviewers=None, team_reviewers=None): + """Cancel review requests on this pull request. + + :param list reviewers: + The users whose review is no longer requested + :param list team_reviewers: + The teams whose review is no longer requested + :returns: + True if successful, False otherwise + :rtype: + bool + """ + url = self._build_url('requested_reviewers', base_url=self._api) + data = {} + if reviewers is not None: + data['reviewers'] = [getattr(r, 'login', r) for r in reviewers] + if team_reviewers is not None: + data['team_reviewers'] = [ + getattr(t, 'slug', t) for t in team_reviewers] + return self._boolean(self._delete(url, data=dumps(data)), 200, 404) + def diff(self): """Return the diff. @@ -495,6 +547,18 @@ class _PullRequest(models.GitHubCore): url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag) + def review_requests(self): + """Retrieve the review requests associated with this pull request. + + :returns: + review requests associated with this pull request + :rtype: + :class:`~github3.pulls.ReviewRequests` + """ + url = self._build_url('requested_reviewers', base_url=self._api) + json = self._json(self._get(url), 200) + return self._instance_or_null(ReviewRequests, json) + def reviews(self, number=-1, etag=None): """Iterate over the reviews associated with this pull request. @@ -748,6 +812,16 @@ class ShortPullRequest(_PullRequest): A :class:`~github3.repos.repo.ShortRepository` from the :attr:`base` instance. + .. attribute:: requested_reviewers + + A list of :class:`~github3.users.ShortUser` from which a review was + requested. + + .. attribute:: requested_teams + + A list of :class:`~github3.orgs.ShortTeam` from which a review was + requested. + .. attribute:: review_comment_urlt A URITemplate instance that expands to provide the review comment URL @@ -1037,3 +1111,29 @@ class ReviewComment(models.GitHubCore): 'body': body, 'in_reply_to': in_reply_to }), 201) return self._instance_or_null(ReviewComment, json) + + +class ReviewRequests(models.GitHubCore): + """Object representing review requests in the GitHub API. + + .. attribute:: teams + + The list of teams that were requested a review + + .. attribute:: users + + The list of users that were requested a review + + Please see GitHub's `Review Request Documentation`_ for more information. + + .. _Review Request Documentation: + https://developer.github.com/v3/pulls/review_requests/ + """ + def _update_attributes(self, requests): + from . import orgs + self.teams = [orgs.ShortTeam(t, self) for t in requests['teams']] + self.users = [users.ShortUser(u, self) for u in requests['users']] + + def _repr(self): + return '<Review Requests [users: {0}, teams: {1}]>'.format( + len(self.users), len(self.teams))
Assigning a reviewer As far as I can tell, it is not possible to assign a reviewer in a pull request, is it? On `develop` you can [pass multiple `assignees` to `issue.edit`](https://github.com/sigmavirus24/github3.py/blob/develop/github3/issues/issue.py#L209); are those supposed to be reviewers? Thanks!
sigmavirus24/github3.py
diff --git a/tests/cassettes/PullRequest_create_review_requests.json b/tests/cassettes/PullRequest_create_review_requests.json new file mode 100644 index 00000000..1ec3b6b4 --- /dev/null +++ b/tests/cassettes/PullRequest_create_review_requests.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1bbXPjthH+Kxz1Q6ZTS3zVizU+p07ucnEnti8Xt725uqMBSUiijyIZkrIta+6/91mAlChalCXSyfWDP8gWKeyDBbBYYPcBlq157LeGrWmaRslQVVnkdSZeOp3bHSecqTGPwkRNvMmM3XnxPDEsVf5qdqKFGs19P1EHfbN11PLc1tDQLEvr9qz+USsIXT6id62Lt+8ervwfdPv9w2+fP/2kf/50qV3cnmmXb8/0y9tfSXaazvzRph4FHXbVnlXueuNxfYAOiUONiKXOtAGMkKeuSJI5L+Ec1K8CIO/YYD6zedwaopuPWknKUo5ODSMeoCY/dL5wdPKY+Qk/aqVe6tOvZ66rJPMoCuNUGYexEvM7j9/j3++ATRMIzhOCXAJg4gWQuGUOfos9no2kOdCNgbY5jL/2/vXp0nduL6yL63Pr4uzNGxRmdyxlcbmx4mViZJZElTlhkPIgFUY1VyX893dvLEBM4gxEmAspt8siCSxRi/rutp5iyXHo++E95Mv6bhp9qQp1JQbd5HcvmNSBgNhSDdMpR4ehGV+p8R4G5DAsIbLEPExSzDACgVXEMXcPg8mEoNB9AF2WYq4LtLmdOLEXpV4YHKjahiigwnjCAu+R1YCCKBmqcECHNUyIQJTfweAOlJUySzWKvTvmLKg7Yu5w7w69WwevJAy4dBHRFP0nTUD0tZfyEXNnNAnFJP561LJDd4ESP/phwhPlL/1BX7mJbwL6/DjHOAepv1Bm8BEwJwXGpHiYWphE1MlKShP8SLG5w2Biyrlyz4JUSUNMfieMXSo/U+xF7gsIguX+gcSnXqLc8XihfPioDJW/QkUn5vA57oilUMrQ9EFb67eN/rWuD019qHc/o8w8cp+WMfVr3Rp2+0PLoDIOtUfCBFg3jlozHk+evhjBQ8y8dJRMGdXnmMeGbY25bhh9s28e9/Wx0zu2XUtzjL7lHA8Ms29pOvAZOmQScHSuhM+fk9bwP/+lYRTODxVKbwgvUP4h5WyWvfSZzf3s+8zzIRgGK2Sp4TOWtefKqWZgwtDIS4v2P2+4h8AT2hP83XNqH/hc0aUqlyiaLC+h/Hr1E52TaU8rHyy6Ua/nGOr42DQHPaNH9tTv9QZ9x+w6va7GXH1g9cy+Y3QdW+9paNGUMyxLWCnJIoor5ZC5bluOGf6tVtaYj1Fq+2/Spg+o/HWZ3rE3fV2mX5fpb7hMk4skx0C7Vt3SepZp6qUd87v7K/8fvvP++JF9+njnBF8eEPQYl28vHi+uz2j3HLAZbQbWIRXejbE4jrIfVpvXQtSFImLH9rp9r4hZX/3Cq1/4hn5BhA4UpGdB+Z7h6eYMd/lqDOEffvaOlPPvZtip+54dM+zOKaqnbX/MnJT28PdI2Sjvz9Of5/Z3ifLx3W/XytmHcxSBDEJvREIe9lEKC1wFu+4wCGeeg9Bg0aEIIYy/KNj7f1ikU/wzOv0jxexY9Kd7JETMTo8cE8q1hmk8R6ZhZ4guN48VvotQ9trEbZNHKA5p6PKFL+qDkPBSxd8sfHaQFmB2iAgqfC41UN007FULKMuNR9oZi+Bin0331nbL0AS70TBs0HtCGrrIBNU+8eyO9mbb9DxiXscADXGlOLRcBXC1e22FsFTzfAumT+BM9wsjto5EDrBU5TcxtmzSYGghDAzbD+36INhBqAJhqSLIkImldNRIL4IkgA1ERDfNlCSAFWIa8wYjIRQkhBXeITHi1sFdBYjLrBd9FkzmbNJAyxUCxphSbhP2+GwCcsesW0MAj5KqsWfPG/qtNQjpKLN/SDnXH+gCxhpR5BNrT+ViZlG0nJJE9TXM5Ddsuykm2WMZ95B8yFaL3JZkke47+6V2j2b+e1sFWbK+SfeKbH+iLv8GUmSaZYYiFu9Fi1R1BMmrS5shS9rpdJaUmSFkkUesr6sUBw6LnSkSvbU7dJkDYJMyY6nIqI9JQRfxmh8yt76OKwSgyXGrraUUL+bsBItXG09IF+FW6dL6mGuIInAQpt7Yc/YhE3Y40A2U5fdIojv8iCEfDetKPceDvWJDTcMmc8C1O0aKowGI8Qku5j6H6dbvlRxgqUrWx+WRHy72zhVvnVUFDPKq21L9unGt6UNtMOxalan+dZljKhPNk2mZMSA2wBha2tASReAss57FN/DOBb53m6LEJEIqSXJ+Fs9/X8sMd8iAdwjKk3rP2u7K69UzctBwGs54hA0DorY8PSGFzA461wX94oZO0vFCao33iHJdMBu9jb2BE84DkC1IJt0To0wr8fpVvp9ABTJgo0pZMpLzeh114lUUh7fcQe4/C9qo2NqTFF7ee1+8TUna8qzeyJBtrcLMi+Mw43wzrkW6zjUTTfx0plJRd8/hQYI2LymAQwsEF4oGZOmuq+wxidwHcXhAootgU34tHitwetfv/dvP/+4+fqZsGpgzGR1SxxXq3+hI8eDyMZv76Uhu4KGGi5jDD6PWV2LfMEeL+f7i6YPhjIFBIoVlln/1KBP7vYFhcdvhmtY9Hju837UZHga2bTlj2+xzwzSOmeH0IV9O7Bdrwc/iMIWlDcwKCv5cu3g7edyDgjcrKXgJ34CCL6m8O81RKnwQEb9x/qM2F78FpQkdXzqU0oCR30B6OVJ+E7ZI6cO+DublN9AOpeY3hMXSDBX2yRLITO6G+MsQ9CWNNgh+6PY8R19M/pt9XevreunMUzn1/3j5+O4en4e1u8sXhmiBd8W0f8m46DQWnbeigStn/otFXx1Hfmxti+G8Oo49T/O8Oo6tJyG3mFSdwz2HsQPVjuD/lyDIaI89GILq1u1NElRAHMwTVOE0pgqqgF+KLajCP5gwqAKqyxlU4b0AbVAFXY85qEJrSB5UwTbhD6owKU2PxX9vCqEKBxuSBizCLtTaRMIu0Hpcwi7E+nRCFWozRqEKtT6pUIXYiFeoAi3yE7DPA6mFKtRm7MIzqIKlgK4HkAFViGUuALMrO9y6Vw6wCpYmaRn6EJqhCncbEXAw01AF/kJkQxV8TljU4xuqUWeNKYcq7DqsQxXWyxAPVej1uIcqtAb0QxVkQwaiCvaFSIgq+D+Ch6iqqwkVUYXZkI2ogt1NSBhtzWzr5rV+POyCkDArCQlTu9a7Q6ubkRY7CYmu9gwhUaHrc5zEbrFnaIndwskuZqJCFItaLXLCHGwjJwa69ZSeEC+fIyjkATLiIv4QfsLUDFDCtRiKLmn/51AUQssNkoIqz+keXPajnnzCU2R8A9EUI98LcJYLTErCfdw4WLamkpOoOBQrGdEK0yhc5AQ0ZfK3ABa4r10w4kYmUISj3wJT9yIkIPMdysuhri+YAL50/6ZpLavLsTsqeZk6nu7css7CCZmXqQFjurqoBOw8qGkKvgqO9r4SQ6bP5jinGo8Qn4eOJ04DgML78eqXX85+uPp4dn31kUJh3KEbSUNunUSnJ0nEAsXxIfPmRppmG6kdnHzFjbgw9FMvinjxazvhNy2FxR5ri8UTUtd0NY4GNb87BzxxQ09Yurin17lpncpreycq1XiqnOD47UatNG2V26QthNr0hHpweY61OZGq7ZQ/pKjsJ4ZLZ6SQQgdHsirE9d68uOeimKlpet8wB0b+NuKxuBkYBjnSudBOiAJGyfKPeXlk6QCzyVSLO7pVUzzbyKOxgCCXU1/8lLrsRGWnJ2p0ehNgmP70641Uc24t1PNkSOtrl9/mvqW8Frki38Ujs+let1w5Y04UdfHNqsiIJhQdPUAmSFwMX12ytMG2SwZ97URxnfyJywPfPGM4WI4PLik7LBjNQtyOh7SsPAs8ceMF89B1PXGxF0+0erk44JI9A8aZ4ugkDoGM6fZka9j7+j+ZEJBcakAAAA==", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Tue, 31 Jul 2018 14:57:42 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["9C0A:3FAD:710C4A:EF0B0C:5B6078F0"], "ETag": ["W/\"e16f3cd04ae275c3bf00290d716e8e65\""], "Date": ["Tue, 31 Jul 2018 14:57:53 GMT"], "X-RateLimit-Remaining": ["4995"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.198781"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": [""], "X-RateLimit-Reset": ["1533052364"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"}, "recorded_at": "2018-07-31T14:57:53"}, {"request": {"body": {"string": "{\"reviewers\": [\"sigmavirus24\"]}", "encoding": "utf-8"}, "headers": {"Content-Length": ["31"], "Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "POST", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/requested_reviewers"}, "response": {"body": {"string": "{\"url\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/873\",\"id\":204405647,\"node_id\":\"MDExOlB1bGxSZXF1ZXN0MjA0NDA1NjQ3\",\"html_url\":\"https://github.com/sigmavirus24/github3.py/pull/873\",\"diff_url\":\"https://github.com/sigmavirus24/github3.py/pull/873.diff\",\"patch_url\":\"https://github.com/sigmavirus24/github3.py/pull/873.patch\",\"issue_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues/873\",\"number\":873,\"state\":\"open\",\"locked\":false,\"title\":\"Add support for review requests\",\"user\":{\"login\":\"jacquerie\",\"id\":381280,\"node_id\":\"MDQ6VXNlcjM4MTI4MA==\",\"avatar_url\":\"https://avatars2.githubusercontent.com/u/381280?v=4\",\"gravatar_id\":\"\",\"url\":\"https://api.github.com/users/jacquerie\",\"html_url\":\"https://github.com/jacquerie\",\"followers_url\":\"https://api.github.com/users/jacquerie/followers\",\"following_url\":\"https://api.github.com/users/jacquerie/following{/other_user}\",\"gists_url\":\"https://api.github.com/users/jacquerie/gists{/gist_id}\",\"starred_url\":\"https://api.github.com/users/jacquerie/starred{/owner}{/repo}\",\"subscriptions_url\":\"https://api.github.com/users/jacquerie/subscriptions\",\"organizations_url\":\"https://api.github.com/users/jacquerie/orgs\",\"repos_url\":\"https://api.github.com/users/jacquerie/repos\",\"events_url\":\"https://api.github.com/users/jacquerie/events{/privacy}\",\"received_events_url\":\"https://api.github.com/users/jacquerie/received_events\",\"type\":\"User\",\"site_admin\":false},\"body\":\"Closes #787 \\r\\n\\r\\nCurrently missing the integration tests, because I want to record them by requesting a review on this very PR : )\",\"created_at\":\"2018-07-27T11:31:15Z\",\"updated_at\":\"2018-07-31T14:57:53Z\",\"closed_at\":null,\"merged_at\":null,\"merge_commit_sha\":\"2c392b4fe1227373971fc69bd40c274c98237401\",\"assignee\":null,\"assignees\":[],\"requested_reviewers\":[{\"login\":\"sigmavirus24\",\"id\":240830,\"node_id\":\"MDQ6VXNlcjI0MDgzMA==\",\"avatar_url\":\"https://avatars3.githubusercontent.com/u/240830?v=4\",\"gravatar_id\":\"\",\"url\":\"https://api.github.com/users/sigmavirus24\",\"html_url\":\"https://github.com/sigmavirus24\",\"followers_url\":\"https://api.github.com/users/sigmavirus24/followers\",\"following_url\":\"https://api.github.com/users/sigmavirus24/following{/other_user}\",\"gists_url\":\"https://api.github.com/users/sigmavirus24/gists{/gist_id}\",\"starred_url\":\"https://api.github.com/users/sigmavirus24/starred{/owner}{/repo}\",\"subscriptions_url\":\"https://api.github.com/users/sigmavirus24/subscriptions\",\"organizations_url\":\"https://api.github.com/users/sigmavirus24/orgs\",\"repos_url\":\"https://api.github.com/users/sigmavirus24/repos\",\"events_url\":\"https://api.github.com/users/sigmavirus24/events{/privacy}\",\"received_events_url\":\"https://api.github.com/users/sigmavirus24/received_events\",\"type\":\"User\",\"site_admin\":false}],\"requested_teams\":[],\"labels\":[],\"milestone\":null,\"commits_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/commits\",\"review_comments_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/comments\",\"review_comment_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/comments{/number}\",\"comments_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues/873/comments\",\"statuses_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/statuses/f9338626397176687c35c650ad184637c25cb160\",\"head\":{\"label\":\"jacquerie:add-review-requests\",\"ref\":\"add-review-requests\",\"sha\":\"f9338626397176687c35c650ad184637c25cb160\",\"user\":{\"login\":\"jacquerie\",\"id\":381280,\"node_id\":\"MDQ6VXNlcjM4MTI4MA==\",\"avatar_url\":\"https://avatars2.githubusercontent.com/u/381280?v=4\",\"gravatar_id\":\"\",\"url\":\"https://api.github.com/users/jacquerie\",\"html_url\":\"https://github.com/jacquerie\",\"followers_url\":\"https://api.github.com/users/jacquerie/followers\",\"following_url\":\"https://api.github.com/users/jacquerie/following{/other_user}\",\"gists_url\":\"https://api.github.com/users/jacquerie/gists{/gist_id}\",\"starred_url\":\"https://api.github.com/users/jacquerie/starred{/owner}{/repo}\",\"subscriptions_url\":\"https://api.github.com/users/jacquerie/subscriptions\",\"organizations_url\":\"https://api.github.com/users/jacquerie/orgs\",\"repos_url\":\"https://api.github.com/users/jacquerie/repos\",\"events_url\":\"https://api.github.com/users/jacquerie/events{/privacy}\",\"received_events_url\":\"https://api.github.com/users/jacquerie/received_events\",\"type\":\"User\",\"site_admin\":false},\"repo\":{\"id\":140643310,\"node_id\":\"MDEwOlJlcG9zaXRvcnkxNDA2NDMzMTA=\",\"name\":\"github3.py\",\"full_name\":\"jacquerie/github3.py\",\"owner\":{\"login\":\"jacquerie\",\"id\":381280,\"node_id\":\"MDQ6VXNlcjM4MTI4MA==\",\"avatar_url\":\"https://avatars2.githubusercontent.com/u/381280?v=4\",\"gravatar_id\":\"\",\"url\":\"https://api.github.com/users/jacquerie\",\"html_url\":\"https://github.com/jacquerie\",\"followers_url\":\"https://api.github.com/users/jacquerie/followers\",\"following_url\":\"https://api.github.com/users/jacquerie/following{/other_user}\",\"gists_url\":\"https://api.github.com/users/jacquerie/gists{/gist_id}\",\"starred_url\":\"https://api.github.com/users/jacquerie/starred{/owner}{/repo}\",\"subscriptions_url\":\"https://api.github.com/users/jacquerie/subscriptions\",\"organizations_url\":\"https://api.github.com/users/jacquerie/orgs\",\"repos_url\":\"https://api.github.com/users/jacquerie/repos\",\"events_url\":\"https://api.github.com/users/jacquerie/events{/privacy}\",\"received_events_url\":\"https://api.github.com/users/jacquerie/received_events\",\"type\":\"User\",\"site_admin\":false},\"private\":false,\"html_url\":\"https://github.com/jacquerie/github3.py\",\"description\":\"Hi, I'm a library for interacting with GItHub's REST API in a convenient and ergonomic way. I work on Python 2.7, 3.4, 3.5, and 3.6\",\"fork\":true,\"url\":\"https://api.github.com/repos/jacquerie/github3.py\",\"forks_url\":\"https://api.github.com/repos/jacquerie/github3.py/forks\",\"keys_url\":\"https://api.github.com/repos/jacquerie/github3.py/keys{/key_id}\",\"collaborators_url\":\"https://api.github.com/repos/jacquerie/github3.py/collaborators{/collaborator}\",\"teams_url\":\"https://api.github.com/repos/jacquerie/github3.py/teams\",\"hooks_url\":\"https://api.github.com/repos/jacquerie/github3.py/hooks\",\"issue_events_url\":\"https://api.github.com/repos/jacquerie/github3.py/issues/events{/number}\",\"events_url\":\"https://api.github.com/repos/jacquerie/github3.py/events\",\"assignees_url\":\"https://api.github.com/repos/jacquerie/github3.py/assignees{/user}\",\"branches_url\":\"https://api.github.com/repos/jacquerie/github3.py/branches{/branch}\",\"tags_url\":\"https://api.github.com/repos/jacquerie/github3.py/tags\",\"blobs_url\":\"https://api.github.com/repos/jacquerie/github3.py/git/blobs{/sha}\",\"git_tags_url\":\"https://api.github.com/repos/jacquerie/github3.py/git/tags{/sha}\",\"git_refs_url\":\"https://api.github.com/repos/jacquerie/github3.py/git/refs{/sha}\",\"trees_url\":\"https://api.github.com/repos/jacquerie/github3.py/git/trees{/sha}\",\"statuses_url\":\"https://api.github.com/repos/jacquerie/github3.py/statuses/{sha}\",\"languages_url\":\"https://api.github.com/repos/jacquerie/github3.py/languages\",\"stargazers_url\":\"https://api.github.com/repos/jacquerie/github3.py/stargazers\",\"contributors_url\":\"https://api.github.com/repos/jacquerie/github3.py/contributors\",\"subscribers_url\":\"https://api.github.com/repos/jacquerie/github3.py/subscribers\",\"subscription_url\":\"https://api.github.com/repos/jacquerie/github3.py/subscription\",\"commits_url\":\"https://api.github.com/repos/jacquerie/github3.py/commits{/sha}\",\"git_commits_url\":\"https://api.github.com/repos/jacquerie/github3.py/git/commits{/sha}\",\"comments_url\":\"https://api.github.com/repos/jacquerie/github3.py/comments{/number}\",\"issue_comment_url\":\"https://api.github.com/repos/jacquerie/github3.py/issues/comments{/number}\",\"contents_url\":\"https://api.github.com/repos/jacquerie/github3.py/contents/{+path}\",\"compare_url\":\"https://api.github.com/repos/jacquerie/github3.py/compare/{base}...{head}\",\"merges_url\":\"https://api.github.com/repos/jacquerie/github3.py/merges\",\"archive_url\":\"https://api.github.com/repos/jacquerie/github3.py/{archive_format}{/ref}\",\"downloads_url\":\"https://api.github.com/repos/jacquerie/github3.py/downloads\",\"issues_url\":\"https://api.github.com/repos/jacquerie/github3.py/issues{/number}\",\"pulls_url\":\"https://api.github.com/repos/jacquerie/github3.py/pulls{/number}\",\"milestones_url\":\"https://api.github.com/repos/jacquerie/github3.py/milestones{/number}\",\"notifications_url\":\"https://api.github.com/repos/jacquerie/github3.py/notifications{?since,all,participating}\",\"labels_url\":\"https://api.github.com/repos/jacquerie/github3.py/labels{/name}\",\"releases_url\":\"https://api.github.com/repos/jacquerie/github3.py/releases{/id}\",\"deployments_url\":\"https://api.github.com/repos/jacquerie/github3.py/deployments\",\"created_at\":\"2018-07-12T01:08:54Z\",\"updated_at\":\"2018-07-12T01:08:59Z\",\"pushed_at\":\"2018-07-31T12:40:49Z\",\"git_url\":\"git://github.com/jacquerie/github3.py.git\",\"ssh_url\":\"[email protected]:jacquerie/github3.py.git\",\"clone_url\":\"https://github.com/jacquerie/github3.py.git\",\"svn_url\":\"https://github.com/jacquerie/github3.py\",\"homepage\":\"https://github3.readthedocs.io\",\"size\":52276,\"stargazers_count\":0,\"watchers_count\":0,\"language\":\"Python\",\"has_issues\":false,\"has_projects\":true,\"has_downloads\":true,\"has_wiki\":false,\"has_pages\":false,\"forks_count\":0,\"mirror_url\":null,\"archived\":false,\"open_issues_count\":0,\"license\":{\"key\":\"other\",\"name\":\"Other\",\"spdx_id\":null,\"url\":null,\"node_id\":\"MDc6TGljZW5zZTA=\"},\"forks\":0,\"open_issues\":0,\"watchers\":0,\"default_branch\":\"develop\"}},\"base\":{\"label\":\"sigmavirus24:master\",\"ref\":\"master\",\"sha\":\"6824ebce0059fce75bace08bb4cfb37e2329a2c7\",\"user\":{\"login\":\"sigmavirus24\",\"id\":240830,\"node_id\":\"MDQ6VXNlcjI0MDgzMA==\",\"avatar_url\":\"https://avatars3.githubusercontent.com/u/240830?v=4\",\"gravatar_id\":\"\",\"url\":\"https://api.github.com/users/sigmavirus24\",\"html_url\":\"https://github.com/sigmavirus24\",\"followers_url\":\"https://api.github.com/users/sigmavirus24/followers\",\"following_url\":\"https://api.github.com/users/sigmavirus24/following{/other_user}\",\"gists_url\":\"https://api.github.com/users/sigmavirus24/gists{/gist_id}\",\"starred_url\":\"https://api.github.com/users/sigmavirus24/starred{/owner}{/repo}\",\"subscriptions_url\":\"https://api.github.com/users/sigmavirus24/subscriptions\",\"organizations_url\":\"https://api.github.com/users/sigmavirus24/orgs\",\"repos_url\":\"https://api.github.com/users/sigmavirus24/repos\",\"events_url\":\"https://api.github.com/users/sigmavirus24/events{/privacy}\",\"received_events_url\":\"https://api.github.com/users/sigmavirus24/received_events\",\"type\":\"User\",\"site_admin\":false},\"repo\":{\"id\":3710711,\"node_id\":\"MDEwOlJlcG9zaXRvcnkzNzEwNzEx\",\"name\":\"github3.py\",\"full_name\":\"sigmavirus24/github3.py\",\"owner\":{\"login\":\"sigmavirus24\",\"id\":240830,\"node_id\":\"MDQ6VXNlcjI0MDgzMA==\",\"avatar_url\":\"https://avatars3.githubusercontent.com/u/240830?v=4\",\"gravatar_id\":\"\",\"url\":\"https://api.github.com/users/sigmavirus24\",\"html_url\":\"https://github.com/sigmavirus24\",\"followers_url\":\"https://api.github.com/users/sigmavirus24/followers\",\"following_url\":\"https://api.github.com/users/sigmavirus24/following{/other_user}\",\"gists_url\":\"https://api.github.com/users/sigmavirus24/gists{/gist_id}\",\"starred_url\":\"https://api.github.com/users/sigmavirus24/starred{/owner}{/repo}\",\"subscriptions_url\":\"https://api.github.com/users/sigmavirus24/subscriptions\",\"organizations_url\":\"https://api.github.com/users/sigmavirus24/orgs\",\"repos_url\":\"https://api.github.com/users/sigmavirus24/repos\",\"events_url\":\"https://api.github.com/users/sigmavirus24/events{/privacy}\",\"received_events_url\":\"https://api.github.com/users/sigmavirus24/received_events\",\"type\":\"User\",\"site_admin\":false},\"private\":false,\"html_url\":\"https://github.com/sigmavirus24/github3.py\",\"description\":\"Hi, I'm a library for interacting with GItHub's REST API in a convenient and ergonomic way. I work on Python 2.7, 3.4, 3.5, and 3.6\",\"fork\":false,\"url\":\"https://api.github.com/repos/sigmavirus24/github3.py\",\"forks_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/forks\",\"keys_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/keys{/key_id}\",\"collaborators_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/collaborators{/collaborator}\",\"teams_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/teams\",\"hooks_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/hooks\",\"issue_events_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues/events{/number}\",\"events_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/events\",\"assignees_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/assignees{/user}\",\"branches_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/branches{/branch}\",\"tags_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/tags\",\"blobs_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/git/blobs{/sha}\",\"git_tags_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/git/tags{/sha}\",\"git_refs_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/git/refs{/sha}\",\"trees_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/git/trees{/sha}\",\"statuses_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/statuses/{sha}\",\"languages_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/languages\",\"stargazers_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/stargazers\",\"contributors_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/contributors\",\"subscribers_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/subscribers\",\"subscription_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/subscription\",\"commits_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/commits{/sha}\",\"git_commits_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/git/commits{/sha}\",\"comments_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/comments{/number}\",\"issue_comment_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues/comments{/number}\",\"contents_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/contents/{+path}\",\"compare_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/compare/{base}...{head}\",\"merges_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/merges\",\"archive_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/{archive_format}{/ref}\",\"downloads_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/downloads\",\"issues_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues{/number}\",\"pulls_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls{/number}\",\"milestones_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/milestones{/number}\",\"notifications_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/notifications{?since,all,participating}\",\"labels_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/labels{/name}\",\"releases_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/releases{/id}\",\"deployments_url\":\"https://api.github.com/repos/sigmavirus24/github3.py/deployments\",\"created_at\":\"2012-03-13T19:58:53Z\",\"updated_at\":\"2018-07-30T15:45:54Z\",\"pushed_at\":\"2018-07-31T12:40:50Z\",\"git_url\":\"git://github.com/sigmavirus24/github3.py.git\",\"ssh_url\":\"[email protected]:sigmavirus24/github3.py.git\",\"clone_url\":\"https://github.com/sigmavirus24/github3.py.git\",\"svn_url\":\"https://github.com/sigmavirus24/github3.py\",\"homepage\":\"https://github3.readthedocs.io\",\"size\":52238,\"stargazers_count\":814,\"watchers_count\":814,\"language\":\"Python\",\"has_issues\":true,\"has_projects\":true,\"has_downloads\":true,\"has_wiki\":false,\"has_pages\":false,\"forks_count\":302,\"mirror_url\":null,\"archived\":false,\"open_issues_count\":54,\"license\":{\"key\":\"other\",\"name\":\"Other\",\"spdx_id\":null,\"url\":null,\"node_id\":\"MDc6TGljZW5zZTA=\"},\"forks\":302,\"open_issues\":54,\"watchers\":814,\"default_branch\":\"master\"}},\"_links\":{\"self\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/873\"},\"html\":{\"href\":\"https://github.com/sigmavirus24/github3.py/pull/873\"},\"issue\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues/873\"},\"comments\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/issues/873/comments\"},\"review_comments\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/comments\"},\"review_comment\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/comments{/number}\"},\"commits\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/commits\"},\"statuses\":{\"href\":\"https://api.github.com/repos/sigmavirus24/github3.py/statuses/f9338626397176687c35c650ad184637c25cb160\"}},\"author_association\":\"COLLABORATOR\",\"body_html\":\"<p><span class=\\\"issue-keyword tooltipped tooltipped-se\\\" aria-label=\\\"This pull request closes issue #787.\\\">Closes</span> <a class=\\\"issue-link js-issue-link\\\" data-error-text=\\\"Failed to load issue title\\\" data-id=\\\"300172382\\\" data-permission-text=\\\"Issue title is private\\\" data-url=\\\"https://github.com/sigmavirus24/github3.py/issues/787\\\" href=\\\"https://github.com/sigmavirus24/github3.py/issues/787\\\">#787</a></p>\\n<p>Currently missing the integration tests, because I want to record them by requesting a review on this very PR : )</p>\",\"body_text\":\"Closes #787\\nCurrently missing the integration tests, because I want to record them by requesting a review on this very PR : )\"}", "encoding": "utf-8"}, "headers": {"Content-Length": ["17225"], "X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Location": ["https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["201 Created"], "X-GitHub-Request-Id": ["9C0A:3FAD:710C64:EF0B3F:5B6078F1"], "ETag": ["\"f1798b9beac5f04fdd4dda4a3c175f45\""], "Date": ["Tue, 31 Jul 2018 14:57:53 GMT"], "X-RateLimit-Remaining": ["4994"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "X-Runtime-rack": ["0.506507"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": [""], "X-RateLimit-Reset": ["1533052364"]}, "status": {"message": "Created", "code": 201}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/requested_reviewers"}, "recorded_at": "2018-07-31T14:57:53"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/cassettes/PullRequest_delete_review_requests.json b/tests/cassettes/PullRequest_delete_review_requests.json new file mode 100644 index 00000000..2b85d0b5 --- /dev/null +++ b/tests/cassettes/PullRequest_delete_review_requests.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1cbXPiRhL+KyruQ+rqDHoDgymvc052s/FVbG82vrutPV9RgzSAvEJSJGEbU/vf7+kZCSTMYJCc7H3gg20kpp/peeuZ7qfHi8Ys9hv9xiRNo6Sv6yzyWmMvncyGLSec6jGPwkRPvPGU3XvxLLHauvzWbkVzPZr5fqL3unbjqOG5jb5ltNtG57jdPWoEocsH9K5x+fbd47X/gzl8//jb508/mZ8/XRmXd+fG1dtz8+ruV5KdpFN/UNajoMO22rPKXW80qg7QInGoEbHUmdSAEfLUFUky42s4e/WrAMg7NphNhzxu9NHNR40kZSlHp4YRD1CTHzpfODp5xPyEHzVSL/Xp23PX1ZJZFIVxqo3CWIv5vccf8Od3wKYJBGcJQS4AMPYCSNwxB9/FHs9G0u6ZVs8oD+Ovx//6dOU7d5fty5uL9uX5mzcozO5ZyuL1xoqXiZXNJKrMCYOUB6mYVDNdwn9//6YNiHGcgYjpQsptm5EEluhFfbfPnmLJUej74QPk1/UtT/q1KvSlGHSTn71gXAUCYgs9TCccHYZmfKXGexiQ/bCEyALrMEmxwggEsyKOubsfTCYEhR4C6LIQa12gzYaJE3tR6oXBnqqVRAEVxmMWeE+sAhREaaIKA7Rfw4QIRPk9JtyeslJmoUexd8+cOXVHzB3u3aN3q+CtCQMunUe0RP9JCxB97aV8wNwpLUKxiL8eNYahO0eJH/0w4Yn2l26vq93GtwH9/DjDOAepP9emsBGYThomk+ZhaWERUSdrKS3wI23IHYYppl1oDyxItTTE4nfC2KXyU204z20BQbDcPpD4xEu0ex7PtQ8ftb72V6joxBw2xx2wFEpZhtlrGt2m1b0xzb5t9s3OZ5SZRe7zMrZ5Y1p9u9u3TqiMQ+2RMAH2jaPGlMfj5y8GsBBTLx0kE4b6eidD69hq905szo9Hxmg4ch3DNbrM7RqO6Y64yU3HtGkTYeiQccDRuRI+f04a/f/8l4ZRGD9UKK0hrAC+WFnA4i4DNLGdtY2erTCCF8bl2/HTDkbQVhpBCV/DCK6pvN0OrhXeyxSWduDK1nADSh2DuHYsqGETS0ivZxbLsEWjivm1t2Usoe1rHEvC+9vHkvjrmMg1jUomdgcrWVrQKWfTbJX7bMj97PPU87Hgw2BpEaRleWFH2PHEq2dg0FXaE2G3Xt5w9oEntGf42/fCXeBzRRe6PFrSJvcayq9OraJzMu3pxIqdqFav5xg6TGbH7BrHo3Z31Dat9tCxHGto9VyXdbA/OHbbsI/dbgctmnAGEw77TjOieMLtM9dtyjHDn+WJOOYjlNr8ndyL9qj8cLze4lMejteH4/U3PF6TiSTDQCc8s20ct23bXDvkvXu49v/hO+9Pntinj/dO8OURwQrr6u3l0+XNOXm9AZvSIX4VCsG7EQ61g+yLpdNZiJbQpk+e1sHtVsSaDnbhYBe+oV0QLj8F17Jg2nZ3SrHCXb4cQ9iHn70j7eK7KTxs3xvGDF41RePIXY+Zk5Lv/YBQq/b+Iv15Nvwu0T6+++1GO/9wgSKQQcgMEQwP5yiNBa4GbzkMwqnnwKWft8izD+MvGnz2D/N0gj9Wq3uk2a02/eocCRG7dUyGCeUa/TSeIUK4NbQmD4+KlhHKToe4TfIIoUEaunzh8+ogJLzQ8TsLezkI57FhiMhH+FJIT900nFULKIvSI52MhXOxy6F7Y7ula4LTaBjW6D0hDV1kYHmXONSW9mbH9NyNW/kANXGlOLRcBl4q99oSYaHncVIsn8CZ7OZGbByJHGChy09ibNm4xtBCGBhDPxxWB8EJQhcICx1OhgwIp4NaehEkAZQQ4d3UU5IAlohpzGuMhFCQEJZ4+/iIGwd36SAusl70WTCesXENLZcIGGOKCY3Z04vEwZZVt4IAHpEhsTec1bRbKxDSUQaYQBVVH+gCxgpR8ACVl3KRERAtp+BudQ0z+dLcrotJ83Edd594yMYZuSnIIs139k3lHs3s96YKMpKtTvcKli7RF38DmTnJIkMRi3eiM1UdQfL6YsjAbrRarQVFZghZxP+r6yrFgcNiZwKCpnKHLnIAHFKmLBVM2IgUdOGv+SFzq+u4RACaHLfKWkrxYsxOsO+V8YR0EW4ZLq2OuYIoAgdh6o08ZxcScIsBLaEsvgf55fAjBh4Jsyv1HA/zFQdqGjYZA67cMVIcDYCPT3Ax9zmmbvVeyQEWumRrXR754XznWPHGVVXAIKu6iaIzrRvD7Bu9fqetpOhWZQRFF82SyTrTZxs3Zq9vG0juoCIwllnP4hPyRQp5GpsUJfILUkmS51Xg+e8rmf4WGfCFwfqi3rG2+/X96gU5aDgJpzzCgQFeWx6ekEJ2C53rgjZ1QydpeSG1xntCuY5l2Sels4ETzgKQpAgmPVAmCO3Eq1f5eQIVSIeNKmXJQK7rldeJV1Ec3nEHsf/MaaNiK0tSePngffHKknTkWb6RLttKhakXx2GWq5FxpNJ0rjJIKK8kU6mou+fwIEGbF+TAoQUihwENyMJd19ljErmPIulHogtnU34spgM5xzfv/bvP/+48faZoGhhv6R1SxxXqL3WkeHD5iM38dCAP8FDDhc/hh1HjK7HmWKPFeH+RYOpPGZhfUlhG+ZePMrB/wsyew81O1wK/zLptd9QdgUToccu2e4bVOT4x2/aJ2YP8emC/WAu+PrDGMsFH5OiUKL4Da7xjMs2BNd6YiCiDwzVZY5lOlAf/7a5pdE1zLVdxPfT/dPX07gE/jytzl28M0RzvimH/knqlUs8i/wfDsSnddMMoHwzHwXBQumO9jLzahmM/dkBtCP5/CYKM9tiBIVC3bmeSQAGxN0+gwqlNFaiAX4stUOHvTRiogKpyBiq8V6ANVNDVmAMVWk3yQAVbhz9QYVKYHkeInSkEFQ6OGjVYhG2olYmEbaDVuIRtiNXpBBVqPUZBhVqdVFAh1uIVVKBFfgLzc09qQYVaj114AVWwFNB1DzJAhbjOBWB1ZUnpO8UAVbC0SNeh96EZVLibiIC9mQYV+CuRDSr4nLCoxjeoUae1KQcVdhXWQYX1OsSDCr0a96BCq0E/qCBrMhAq2FciIVTwfwQPoaqrDhWhwqzJRqhgtxMSVtOwm6Z9Y570OyAkbCUhQWxDp9/uZKSFipCQRcwXCAmFri9xEtvFXqAltgsn25gJhSg2tWrkRG8TOdEz28/pCfHyJYJCJpARF/GH8BO2YYESrsRQdEj7P4eiEFqWSAqqPKd7cEmXevIZT5HxDURTDHwvQC4XmJSE+7hxsGhMJCehSIrd5UIHXcAGNCUsbgAscF+KGSbucYub1EARhn4DTNULzIDMTyivh7q6YAL4tfs3dWtZXmrfUsnr1PH85JZ1FjJkXqcGjOnyohKwc6emLvj+93Fo6rMZ8lTjAfzz0PFENgAovB+vf/nl/Ifrj+c31x/JFcbd14GcyI3T6Ow0iVigOT5k3tzKqdlEaAeZr7jJGoZ+6kURL35sJvy2obHYY02xeULqhq600qDmd16BJ27Wipku7te2bhtn8rrtqU41nmmnSL8t1UrLVrtLmkKoSU+oB5deWZMTqdpM+WOKyn5iuHRGCmmUOJJVIa7l58U9F8VswzC7lt2z8rcRj8WN3jDIkS6EdkIUMFoWf8zLI0oHmDJTLag31RLPDvJoLCDI5FQXP6MuO9XZ2akend0GGKY//Voy1ZzPFup5mkir69Lf5p60vM68JN/FIxvS/2OQO2fMiaIuvlkWGdCCotSDIZ94gYuWZXejhyDbJYG+sqH4LxDPLB7I8SlDXjl+8L8FHBYMpiH+qQWkZd2Z34kLL1iGruuJ+/h4srGbuchvyZ4B40yQOYkckBFdnsQV6K//A9hidhshRAAA", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Tue, 31 Jul 2018 12:37:29 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["9746:3FAC:7DC188:102C2C9:5B605840"], "ETag": ["W/\"cc4210ad9f1cd12d4300e31b40a12398\""], "Date": ["Tue, 31 Jul 2018 12:38:25 GMT"], "X-RateLimit-Remaining": ["4995"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.225128"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": [""], "X-RateLimit-Reset": ["1533044057"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"}, "recorded_at": "2018-07-31T12:38:25"}, {"request": {"body": {"string": "{\"reviewers\": [\"sigmavirus24\"]}", "encoding": "utf-8"}, "headers": {"Content-Length": ["31"], "Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "DELETE", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/requested_reviewers"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1bbXPiOBL+Ky7uw9bVBfwGgVCZ7GV3ZmdztUlmZ3N3U3O5ooQtwBlje22ThFDz3+9pyQbjIAI2u3Mf8iEBbPWjltSSuvuRFo1Z7Df6jUmaRklf11nktcZeOpkNW0441WMehYmeeOMpu/fiWWK1dfnWbkVzPZr5fqL3unbjqOG5jb5ltNtG57jdPWoEocsH9Kxx+fbd47X/gzl8//jb508/mZ8/XRmXd+fG1dtz8+ruV5KdpFN/sK5HQYdttWeVu95oVB2gReJQI2KpM6kBI+SpK5Jkxks4e/WrAMg7NphNhzxu9NHNR40kZSlHp4YRD1CTHzpfODp5xPyEHzVSL/Xp7bnrasksisI41UZhrMX83uMP+PgdsGkCwVlCkAsAjL0AEnfMwbvY49lI2j3T6hnrw/jr8b8+XfnO3WX78uaifXn+5g0Ks3uWsrjcWPEwsTJLosqcMEh5kAqjmukS/vv7N21AjOMMRJgLKbfNIgks0Yv6breeYslR6PvhA+TL+q4bfakKfSkG3eR3LxhXgYDYQg/TCUeHoRlfqfEeBmQ/LCGywDxMUswwAoFVxDF394PJhKDQQwBdFmKuC7TZMHFiL0q9MNhTtTVRQIXxmAXeE6sABVEyVLEA7dcwIQJRfg+D21NWyiz0KPbumTOn7oi5w7179G4VvJIw4NJ5RFP0nzQB0ddeygfMndIkFJP461FjGLpzlPjRDxOeaH/p9rrabXwb0N+PM4xzkPpzbYo1AuakwZg0D1MLk4g6WUtpgh9pQ+4wmJh2oT2wINXSEJPfCWOXyk+14TxfCwiC5esDiU+8RLvn8Vz78FHra3+Fik7Msea4A5ZCKcswe02j27S6N6bZt82+2fmMMrPIfV7GNm9Mq2/3+pYo41B7JEyAfeOoMeXx+PmDAVaIqZcOkglDfb2ToXVstXsnNufHI2M0HLmO4Rpd5nYNx3RH3OSmY9q0iTB0yDjg6FwJn/9OGv3//JeGUSx+qFCuhlgFyi9SzqbZQ58NuZ99n3o+BMNgiSw1fMGydtw59QxMGBqt0qL9LxvuPvCE9gx/+5zaBT5XdKHLLYomyyGUX+1+onMy7Wnng0XX6vUcQ7fgpphd43jU7o7aptUeOpZjDa2e67IO7Myx24Z97HY7aNGEM2xL2CnJIoo7ZZ+5blOOGT6WO2vMRyi1+Z206T0qf92mt/imr9v06zb9DbdpWiJpYSCv1Wwbx23bNkse87uHa/8fvvP+5Il9+njvBF8eEfRYV28vny5vzsl7DtiUnIFVSIVnI2yOg+zF0nktRF0oIjy2V/ddEbO+rguv68I3XBdE6EBBehaU7xiers9wly/HEOvDz96RdvHdFJ667w1jBu+conpy+2PmpOTDPyBlo72/SH+eDb9LtI/vfrvRzj9coAhkEHojEvLgR2kscDV43WEQTj0HocG8RRFCGH/R4Pt/mKcTfFit7pFmt9r0r3MkROzWMS1MKNfop/EMmYatIbp0HhVrF6Hs5MRtkkcoDmno8oXPq4OQ8ELH/yx8dpAWYMMQEVT4UmpA3TT4qgWUxdpP8oxFcLGL072x3TI0gTcahjV6T0hDF5mg2iWe3dLezE3PI+ZVDFATV4pDy2UAV7nXlggLPc+3YPoEzmS3MGLjSOQAC11+E2PLxjWGFsLAGPrhsDoIPAhdICx0BBkysZQOaulFkASwhojopp6SBLBETGNeYySEgoSwxNsnRtw4uMsAcZH1os+C8YyNa2i5RMAYU8ptzJ5eTEBumXUrCOBRUjX2hrOa69YKhHSU2T+knKsPdAFjhSjyiZWncjGzKFpOSaLqGmbya7ZdF5PssYy7Tz5ko0VuSrLI5Tt7U7lHs/V7UwVZsr5O94psf6Iv/gZSZJJlhiIW70SLqDqC5PXFkCFL2mq1FpSZIWSRR6yuqxQHDoudCRK9lTt0kQPASZmyVGTUR6Sgi3jND5lbXcclAtDkuFXWUooXc3aCxauMJ6SLcMt0aXXMFUQROAhTb+Q5u5AJWxbQNZTF90iiO/yIIR8N60o9x4O9wqGmYZM54ModI8XRAMT4BBdzn8N0q/dKDrDQJevj8sgP5zvnijfOqgIGraqbUv2mdWOYfaPX77SVqf5VmRMqE82SSZkxsI0bs9e3DZDEVASLZdaz+AbeucD3blKUmERIJUnOz+L331cy/S0y4B2C8qTesbb78n71ghw0nIRTHsFhQNSWpyekkN1C57qgX9zQSVpeSK3xnlCuY1n2yZpv4ISzAGQLkkkPxCjTTrx6lPsTqEAGbFQpSwZyXq+iTjyK4vCOO8j9Z0EbFVutJIWHD94Xb12SXJ7lExmyrVSYenEcZpxvxrXIpXPFRBM/nalU1N1zeJCgzQsK4NACwYWiAVm66zr7mUTuozg8INFFsCm/Fo8VOMc37/27z//uPH2mbBqYMxkdUscV6l/rSPHD5SM289OBdOChhouYww+jxldi3zBHi/n+4umD/pSBQSKFZZZ/+VMm9k+Y2XO42ela4KlYt+2OuiOQCD1u2XbPsDrHJ2bbPjF7kC8n9ou14LU4TNE2eraCgr8wLt+On3ag4G0lBS/ha1DwJZW3pzlKhfci4tfOf1Tm4jeg1KHjS4dSajDya0iHI+XXYYuUPuxrb15+DW1fan5NWGzNUGGXLIHM5K6JH4agL2m0RvBDt5c5+mLy3+6aRtc0S2eeyqn/p6undw/4e1wtd/nGEM3xrJj2LxkXncai81Y0cOXMf7Ho68KRH1vbYDivC8eOp3leF46NJyE3mFSVwz37sQPqheD/lyDIaI8dGAJ163YmCRQQe/MEKpzaVIEK+FBsgQp/b8JABVSVM1DhHYA2UEFXYw5UaDXJAxVsHf5AhUlpemz+O1MIKhw4JDVYhG2olYmEbaDVuIRtiNXpBBVqPUZBhVqdVFAh1uIVVKBFfgL2uSe1oEKtxy68gCpYCui6BxmgQixzAZhd2eHWnXKAKliapGXofWgGFe4mImBvpkEFfiCyQQWfExbV+AY16rQ25aDCrsI6qLAOQzyo0KtxDyq0GvSDCrImA6GCPRAJoYL/I3gIVV11qAgVZk02QgW7nZCwmobdNO0b86TfASFhKwkJYhs6/XYnIy1UhIQsYr5ASCh0fYmT2C72Ai2xXTjZxkwoRLGpVSMnepvIiZ7Zfk5PiIcvERTyABlxEX8IP2EbFijhSgxFh7T/cygKoeUaSUGV53QPLvtRTz7jKTK+gWiKge8FOMsFJiXhPm4cLBoTyUkoDsVKRlRhGoWLnICmTP4GwAL3tQ1G3MgEiljoN8BUvQgJyNxDORzq6oIJ4Ev3b+rWsrwcu6WSw9Tx3HPLOgsnZA5TA8Z0eVEJ2HlQUxd8GRztfCWGTJ/NcE41HiA+Dx1PnAYAhffj9S+/nP9w/fH85vojhcK4QzeQhtw4jc5Ok4gFmuND5s2tNM0mUjs4+YobcWHop14U8eLXZsJvGxqLPdYUmyekbuhqHA1qfncOeOKGnrB0cU+vdds4k9f2TnWq8Uw7xfHbtVpp2mp3SVMINekX6sHlOdbkRKo2U/6YorKfGC6dkUIaHRzJqhDXe/PinotitmGYXcvuWfnTiMfiZmAY5EgXQjshChgtyz/m5ZGlA8w6Uy3u6KqmeObIo7GAoCWnuvgZddmpzs5O9ejsNsAw/enXG6nm3Fqo58mQVtcuv8V9y6//A4TTh8qRPwAA", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["9746:3FAC:7DC1C0:102C315:5B605841"], "ETag": ["W/\"0962ba2945f8ad21491ca6d3038d3bd3\""], "Date": ["Tue, 31 Jul 2018 12:38:25 GMT"], "X-RateLimit-Remaining": ["4994"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.286925"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": [""], "X-RateLimit-Reset": ["1533044057"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/requested_reviewers"}, "recorded_at": "2018-07-31T12:38:26"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/cassettes/PullRequest_review_requests.json b/tests/cassettes/PullRequest_review_requests.json new file mode 100644 index 00000000..606e37cd --- /dev/null +++ b/tests/cassettes/PullRequest_review_requests.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1cbXPiRhL+KyruQ+rqDHoDgymvc052s/FVbG82vrutPV9RgzSAvEJSJGEbU/vf7+kZCSTMYJCc7H3gg20kpp/peeuZ7qfHi8Ys9hv9xiRNo6Sv6yzyWmMvncyGLSec6jGPwkRPvPGU3XvxLLHauvzWbkVzPZr5fqL3unbjqOG5jb5ltNtG57jdPWoEocsH9K5x+fbd47X/gzl8//jb508/mZ8/XRmXd+fG1dtz8+ruV5KdpFN/UNajoMO22rPKXW80qg7QInGoEbHUmdSAEfLUFUky42s4e/WrAMg7NphNhzxu9NHNR40kZSlHp4YRD1CTHzpfODp5xPyEHzVSL/Xp23PX1ZJZFIVxqo3CWIv5vccf8Od3wKYJBGcJQS4AMPYCSNwxB9/FHs9G0u6ZVs8oD+Ovx//6dOU7d5fty5uL9uX5mzcozO5ZyuL1xoqXiZXNJKrMCYOUB6mYVDNdwn9//6YNiHGcgYjpQsptm5EEluhFfbfPnmLJUej74QPk1/UtT/q1KvSlGHSTn71gXAUCYgs9TCccHYZmfKXGexiQ/bCEyALrMEmxwggEsyKOubsfTCYEhR4C6LIQa12gzYaJE3tR6oXBnqqVRAEVxmMWeE+sAhREaaIKA7Rfw4QIRPk9JtyeslJmoUexd8+cOXVHzB3u3aN3q+CtCQMunUe0RP9JCxB97aV8wNwpLUKxiL8eNYahO0eJH/0w4Yn2l26vq93GtwH9/DjDOAepP9emsBGYThomk+ZhaWERUSdrKS3wI23IHYYppl1oDyxItTTE4nfC2KXyU204z20BQbDcPpD4xEu0ex7PtQ8ftb72V6joxBw2xx2wFEpZhtlrGt2m1b0xzb5t9s3OZ5SZRe7zMrZ5Y1p9u9u3TqiMQ+2RMAH2jaPGlMfj5y8GsBBTLx0kE4b6eidD69hq905szo9Hxmg4ch3DNbrM7RqO6Y64yU3HtGkTYeiQccDRuRI+f04a/f/8l4ZRGD9UKK0hrAC+WFnA4i4DNLGdtY2erTCCF8bl2/HTDkbQVhpBCV/DCK6pvN0OrhXeyxSWduDK1nADSh2DuHYsqGETS0ivZxbLsEWjivm1t2Usoe1rHEvC+9vHkvjrmMg1jUomdgcrWVrQKWfTbJX7bMj97PPU87Hgw2BpEaRleWFH2PHEq2dg0FXaE2G3Xt5w9oEntGf42/fCXeBzRRe6PFrSJvcayq9OraJzMu3pxIqdqFav5xg6TGbH7BrHo3Z31Dat9tCxHGto9VyXdbA/OHbbsI/dbgctmnAGEw77TjOieMLtM9dtyjHDn+WJOOYjlNr8ndyL9qj8cLze4lMejteH4/U3PF6TiSTDQCc8s20ct23bXDvkvXu49v/hO+9Pntinj/dO8OURwQrr6u3l0+XNOXm9AZvSIX4VCsG7EQ61g+yLpdNZiJbQpk+e1sHtVsSaDnbhYBe+oV0QLj8F17Jg2nZ3SrHCXb4cQ9iHn70j7eK7KTxs3xvGDF41RePIXY+Zk5Lv/YBQq/b+Iv15Nvwu0T6+++1GO/9wgSKQQcgMEQwP5yiNBa4GbzkMwqnnwKWft8izD+MvGnz2D/N0gj9Wq3uk2a02/eocCRG7dUyGCeUa/TSeIUK4NbQmD4+KlhHKToe4TfIIoUEaunzh8+ogJLzQ8TsLezkI57FhiMhH+FJIT900nFULKIvSI52MhXOxy6F7Y7ula4LTaBjW6D0hDV1kYHmXONSW9mbH9NyNW/kANXGlOLRcBl4q99oSYaHncVIsn8CZ7OZGbByJHGChy09ibNm4xtBCGBhDPxxWB8EJQhcICx1OhgwIp4NaehEkAZQQ4d3UU5IAlohpzGuMhFCQEJZ4+/iIGwd36SAusl70WTCesXENLZcIGGOKCY3Z04vEwZZVt4IAHpEhsTec1bRbKxDSUQaYQBVVH+gCxgpR8ACVl3KRERAtp+BudQ0z+dLcrotJ83Edd594yMYZuSnIIs139k3lHs3s96YKMpKtTvcKli7RF38DmTnJIkMRi3eiM1UdQfL6YsjAbrRarQVFZghZxP+r6yrFgcNiZwKCpnKHLnIAHFKmLBVM2IgUdOGv+SFzq+u4RACaHLfKWkrxYsxOsO+V8YR0EW4ZLq2OuYIoAgdh6o08ZxcScIsBLaEsvgf55fAjBh4Jsyv1HA/zFQdqGjYZA67cMVIcDYCPT3Ax9zmmbvVeyQEWumRrXR754XznWPHGVVXAIKu6iaIzrRvD7Bu9fqetpOhWZQRFF82SyTrTZxs3Zq9vG0juoCIwllnP4hPyRQp5GpsUJfILUkmS51Xg+e8rmf4WGfCFwfqi3rG2+/X96gU5aDgJpzzCgQFeWx6ekEJ2C53rgjZ1QydpeSG1xntCuY5l2Sels4ETzgKQpAgmPVAmCO3Eq1f5eQIVSIeNKmXJQK7rldeJV1Ec3nEHsf/MaaNiK0tSePngffHKknTkWb6RLttKhakXx2GWq5FxpNJ0rjJIKK8kU6mou+fwIEGbF+TAoQUihwENyMJd19ljErmPIulHogtnU34spgM5xzfv/bvP/+48faZoGhhv6R1SxxXqL3WkeHD5iM38dCAP8FDDhc/hh1HjK7HmWKPFeH+RYOpPGZhfUlhG+ZePMrB/wsyew81O1wK/zLptd9QdgUToccu2e4bVOT4x2/aJ2YP8emC/WAu+PrDGMsFH5OiUKL4Da7xjMs2BNd6YiCiDwzVZY5lOlAf/7a5pdE1zLVdxPfT/dPX07gE/jytzl28M0RzvimH/knqlUs8i/wfDsSnddMMoHwzHwXBQumO9jLzahmM/dkBtCP5/CYKM9tiBIVC3bmeSQAGxN0+gwqlNFaiAX4stUOHvTRiogKpyBiq8V6ANVNDVmAMVWk3yQAVbhz9QYVKYHkeInSkEFQ6OGjVYhG2olYmEbaDVuIRtiNXpBBVqPUZBhVqdVFAh1uIVVKBFfgLzc09qQYVaj114AVWwFNB1DzJAhbjOBWB1ZUnpO8UAVbC0SNeh96EZVLibiIC9mQYV+CuRDSr4nLCoxjeoUae1KQcVdhXWQYX1OsSDCr0a96BCq0E/qCBrMhAq2FciIVTwfwQPoaqrDhWhwqzJRqhgtxMSVtOwm6Z9Y570OyAkbCUhQWxDp9/uZKSFipCQRcwXCAmFri9xEtvFXqAltgsn25gJhSg2tWrkRG8TOdEz28/pCfHyJYJCJpARF/GH8BO2YYESrsRQdEj7P4eiEFqWSAqqPKd7cEmXevIZT5HxDURTDHwvQC4XmJSE+7hxsGhMJCehSIrd5UIHXcAGNCUsbgAscF+KGSbucYub1EARhn4DTNULzIDMTyivh7q6YAL4tfs3dWtZXmrfUsnr1PH85JZ1FjJkXqcGjOnyohKwc6emLvj+93Fo6rMZ8lTjAfzz0PFENgAovB+vf/nl/Ifrj+c31x/JFcbd14GcyI3T6Ow0iVigOT5k3tzKqdlEaAeZr7jJGoZ+6kURL35sJvy2obHYY02xeULqhq600qDmd16BJ27Wipku7te2bhtn8rrtqU41nmmnSL8t1UrLVrtLmkKoSU+oB5deWZMTqdpM+WOKyn5iuHRGCmmUOJJVIa7l58U9F8VswzC7lt2z8rcRj8WN3jDIkS6EdkIUMFoWf8zLI0oHmDJTLag31RLPDvJoLCDI5FQXP6MuO9XZ2akend0GGKY//Voy1ZzPFup5mkir69Lf5p60vM68JN/FIxvS/2OQO2fMiaIuvlkWGdCCotQDB6km5MVmV6OH4Nolf74yofgnEM8MHrjxKUNaOX7wrwUcFgymIf6nBaRl1ZnbifsuWIWu64nr+HiysZm5SG/JngHjTJA4iRSQEd2dxA3or/8DsJEztiBEAAA=", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Tue, 31 Jul 2018 12:37:29 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["B580:3FAB:52E7FA:B40A6F:5B605829"], "ETag": ["W/\"01257746bfd27013f4b23d3f1d29c965\""], "Date": ["Tue, 31 Jul 2018 12:38:01 GMT"], "X-RateLimit-Remaining": ["58"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.178221"], "Vary": ["Accept"], "X-RateLimit-Limit": ["60"], "Cache-Control": ["public, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-RateLimit-Reset": ["1533043716"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873"}, "recorded_at": "2018-07-31T12:38:01"}, {"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/requested_reviewers"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA52TUWuDMBDHv0ueSyNtGUOQMejLHhzsYWMwiqR6jTdiIkm0tOJ330VlMF+GPinhfr/755J0rHFgHYu/OqaMRM1i5lBWokXbuN2BbRgWLN4dosd9tGHaFJCFBZYe3x4+Pl9V/v0SpUd5T5+ThIpFK7ywWWMV1ZTe1y7mfFx0+61EXzbn0DE32oP229xUvOGj/qlNQj9pJ8nQhxZmshonzwiH+HwWufSVmmUYWw/IrPhilDJXssxT/9eI/5IUcvxHLVdaiOy48SXQ8GhLfRgEOr881EB1PHzopILH0YlYKBYHmziKddWUqOMWajMIm7PLLdYejV4e8A9NNmOl0HgX62xEO5KEaMujDBTR0NJdXI6PWMdri63Ib2E0FnLAloa9UjnjyehvNdA7eKdLEUaPHjJRVOGhXoRy0J+oBEQVnvCp/wEtr3r+zwMAAA==", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["B580:3FAB:52E818:B40A9B:5B605829"], "ETag": ["W/\"ef248d21c37e005a30a4c5e05b46f3fd\""], "Date": ["Tue, 31 Jul 2018 12:38:02 GMT"], "X-RateLimit-Remaining": ["57"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.069502"], "Vary": ["Accept"], "X-RateLimit-Limit": ["60"], "Cache-Control": ["public, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-RateLimit-Reset": ["1533043716"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/873/requested_reviewers"}, "recorded_at": "2018-07-31T12:38:02"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/integration/test_pulls.py b/tests/integration/test_pulls.py index c794f582..8a19825c 100644 --- a/tests/integration/test_pulls.py +++ b/tests/integration/test_pulls.py @@ -58,6 +58,15 @@ class TestPullRequest(IntegrationHelper): ) assert isinstance(comment, github3.pulls.ReviewComment) + def test_create_review_requests(self): + """Show that a user can create review requests on a PR.""" + self.token_login() + cassette_name = self.cassette_name('create_review_requests') + with self.recorder.use_cassette(cassette_name): + p = self.get_pull_request(num=873) + pull_request = p.create_review_requests(reviewers=['sigmavirus24']) + assert isinstance(pull_request, github3.pulls.ShortPullRequest) + def test_create_review(self): """Verify the request to create a pending review on a PR.""" self.token_login() @@ -70,6 +79,14 @@ class TestPullRequest(IntegrationHelper): ) assert isinstance(comment, github3.pulls.PullReview) + def test_delete_review_requests(self): + """Show that a user can delete review requests on a PR.""" + self.token_login() + cassette_name = self.cassette_name('delete_review_requests') + with self.recorder.use_cassette(cassette_name): + p = self.get_pull_request(num=873) + assert p.delete_review_requests(reviewers=['sigmavirus24']) is True + def test_diff(self): """Show that one can retrieve a bytestring diff of a PR.""" cassette_name = self.cassette_name('diff') @@ -148,6 +165,14 @@ class TestPullRequest(IntegrationHelper): for comment in p.review_comments(): assert isinstance(comment, github3.pulls.ReviewComment) + def test_review_requests(self): + """Show that one can retrieve the review requests of a PR.""" + cassette_name = self.cassette_name('review_requests') + with self.recorder.use_cassette(cassette_name): + p = self.get_pull_request(num=873) + review_requests = p.review_requests() + assert isinstance(review_requests, github3.pulls.ReviewRequests) + def test_update(self): """Show that one can update an open Pull Request.""" self.basic_login() diff --git a/tests/unit/test_pulls.py b/tests/unit/test_pulls.py index c4eb62d8..52341410 100644 --- a/tests/unit/test_pulls.py +++ b/tests/unit/test_pulls.py @@ -65,6 +65,15 @@ class TestPullRequest(helper.UnitHelper): } ) + def test_create_review_requests(self): + """Verify the request to ask for reviews on a PR.""" + self.instance.create_review_requests(reviewers=['sigmavirus24']) + + self.session.post.assert_called_once_with( + url_for('requested_reviewers'), + '{"reviewers": ["sigmavirus24"]}' + ) + def test_create_review(self): """Verify the request to create a review on a PR.""" self.instance.create_review('body', 'sha', 'APPROVED') @@ -79,6 +88,15 @@ class TestPullRequest(helper.UnitHelper): } ) + def test_delete_review_requests(self): + """Verify the request to cancel review requests on a PR.""" + self.instance.delete_review_requests(reviewers=['sigmavirus24']) + + self.session.delete.assert_called_once_with( + url_for('requested_reviewers'), + data='{"reviewers": ["sigmavirus24"]}' + ) + def test_diff(self): """Show that a user can request the diff of a Pull Request.""" self.instance.diff() @@ -143,6 +161,14 @@ class TestPullRequest(helper.UnitHelper): } ) + def test_review_requests(self): + """Verify the request to fetch the review requests from a PR.""" + self.instance.review_requests() + + self.session.get.assert_called_once_with( + url_for('requested_reviewers') + ) + def test_update(self): """Show that a user can update a Pull Request.""" self.instance.update('my new title', @@ -202,6 +228,14 @@ class TestPullRequestRequiresAuthentication( with pytest.raises(GitHubError): self.instance.create_review_comment('', '', '', 1) + def test_create_review_requests(self): + """Show that you must be authenticated to ask for reviews.""" + self.assert_requires_auth(self.instance.create_review_requests) + + def test_delete_review_requests(self): + """Show that you must be authenticated to cancel review requests.""" + self.assert_requires_auth(self.instance.delete_review_requests) + def test_merge(self): """Show that you must be authenticated to merge a Pull Request.""" with pytest.raises(GitHubError):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "betamax", "betamax_matchers" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 betamax==0.8.1 betamax-matchers==0.4.0 certifi==2021.5.30 charset-normalizer==2.0.12 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 -e git+https://github.com/sigmavirus24/github3.py.git@6824ebce0059fce75bace08bb4cfb37e2329a2c7#egg=github3.py idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mock==1.0.1 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 swebench-matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 uritemplate==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: github3.py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - betamax==0.8.1 - betamax-matchers==0.4.0 - charset-normalizer==2.0.12 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mock==1.0.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - uritemplate==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - wheel==0.21.0 - zipp==3.6.0 prefix: /opt/conda/envs/github3.py
[ "tests/integration/test_pulls.py::TestPullRequest::test_create_review_requests", "tests/integration/test_pulls.py::TestPullRequest::test_delete_review_requests", "tests/integration/test_pulls.py::TestPullRequest::test_review_requests", "tests/unit/test_pulls.py::TestPullRequest::test_create_review_requests", "tests/unit/test_pulls.py::TestPullRequest::test_delete_review_requests", "tests/unit/test_pulls.py::TestPullRequest::test_review_requests", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_create_review_requests", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_delete_review_requests" ]
[]
[ "tests/integration/test_pulls.py::TestPullRequest::test_close", "tests/integration/test_pulls.py::TestPullRequest::test_commits", "tests/integration/test_pulls.py::TestPullRequest::test_create_comment", "tests/integration/test_pulls.py::TestPullRequest::test_create_review", "tests/integration/test_pulls.py::TestPullRequest::test_create_review_comment", "tests/integration/test_pulls.py::TestPullRequest::test_diff", "tests/integration/test_pulls.py::TestPullRequest::test_files", "tests/integration/test_pulls.py::TestPullRequest::test_is_merged", "tests/integration/test_pulls.py::TestPullRequest::test_issue", "tests/integration/test_pulls.py::TestPullRequest::test_issue_comments", "tests/integration/test_pulls.py::TestPullRequest::test_patch", "tests/integration/test_pulls.py::TestPullRequest::test_pull_reviews", "tests/integration/test_pulls.py::TestPullRequest::test_reopen", "tests/integration/test_pulls.py::TestPullRequest::test_repository", "tests/integration/test_pulls.py::TestPullRequest::test_review_comments", "tests/integration/test_pulls.py::TestPullRequest::test_update", "tests/integration/test_pulls.py::TestPullReview::test_submit", "tests/integration/test_pulls.py::TestReviewComment::test_reply", "tests/integration/test_pulls.py::TestPullFile::test_contents", "tests/unit/test_pulls.py::TestPullRequest::test_attributes", "tests/unit/test_pulls.py::TestPullRequest::test_close", "tests/unit/test_pulls.py::TestPullRequest::test_create_comment", "tests/unit/test_pulls.py::TestPullRequest::test_create_review", "tests/unit/test_pulls.py::TestPullRequest::test_create_review_comment", "tests/unit/test_pulls.py::TestPullRequest::test_diff", "tests/unit/test_pulls.py::TestPullRequest::test_is_merged_request", "tests/unit/test_pulls.py::TestPullRequest::test_issue", "tests/unit/test_pulls.py::TestPullRequest::test_merge", "tests/unit/test_pulls.py::TestPullRequest::test_merge_squash_message", "tests/unit/test_pulls.py::TestPullRequest::test_patch", "tests/unit/test_pulls.py::TestPullRequest::test_reopen", "tests/unit/test_pulls.py::TestPullRequest::test_update", "tests/unit/test_pulls.py::TestPullReview::test_submit", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_close", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_create_review_comment", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_merge", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_reopen", "tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_update", "tests/unit/test_pulls.py::TestPullRequestIterator::test_commits", "tests/unit/test_pulls.py::TestPullRequestIterator::test_files", "tests/unit/test_pulls.py::TestPullRequestIterator::test_issue_comments", "tests/unit/test_pulls.py::TestPullRequestIterator::test_review_comments", "tests/unit/test_pulls.py::TestPullRequestIterator::test_reviews", "tests/unit/test_pulls.py::TestReviewComment::test_reply", "tests/unit/test_pulls.py::TestReviewComment::test_reply_requires_authentication", "tests/unit/test_pulls.py::TestPullFile::test_contents", "tests/unit/test_pulls.py::TestPullFilePatch::test_contents" ]
[]
BSD 3-Clause "New" or "Revised" License
2,835
[ "src/github3/pulls.py" ]
[ "src/github3/pulls.py" ]
Azure__iotedgedev-211
31d6219f41caf9401f5c49263a963e3a748e69bf
2018-07-27 12:11:04
a22c7b0c59505964c5aeab6f8ff859842783c236
diff --git a/iotedgedev/modules.py b/iotedgedev/modules.py index d36367e..8b058ba 100644 --- a/iotedgedev/modules.py +++ b/iotedgedev/modules.py @@ -84,31 +84,30 @@ class Modules: tags_to_build = set() for module in os.listdir(self.envvars.MODULES_PATH): - if module not in bypass_modules: - module_dir = os.path.join(self.envvars.MODULES_PATH, module) - module_json = Module(self.output, self.utility, os.path.join(module_dir, "module.json")) - for platform in module_json.platforms: - # get the Dockerfile from module.json - dockerfile = os.path.abspath(os.path.join(module_dir, module_json.get_dockerfile_by_platform(platform))) - container_tag = "" if self.envvars.CONTAINER_TAG == "" else "-" + self.envvars.CONTAINER_TAG - tag = "{0}:{1}{2}-{3}".format(module_json.repository, module_json.tag_version, container_tag, platform).lower() - image_tag_map[(module, platform)] = tag - tag_dockerfile_map[tag] = (module, dockerfile) - tag_build_options_map[tag] = module_json.build_options - if len(active_platform) > 0 and (active_platform[0] == "*" or platform in active_platform): - tags_to_build.add(tag) + module_dir = os.path.join(self.envvars.MODULES_PATH, module) + module_json = Module(self.output, self.utility, os.path.join(module_dir, "module.json")) + for platform in module_json.platforms: + # get the Dockerfile from module.json + dockerfile = os.path.abspath(os.path.join(module_dir, module_json.get_dockerfile_by_platform(platform))) + container_tag = "" if self.envvars.CONTAINER_TAG == "" else "-" + self.envvars.CONTAINER_TAG + tag = "{0}:{1}{2}-{3}".format(module_json.repository, module_json.tag_version, container_tag, platform).lower() + image_tag_map[(module, platform)] = tag + tag_dockerfile_map[tag] = (module, dockerfile) + tag_build_options_map[tag] = module_json.build_options + if not self.utility.in_asterisk_list(module, bypass_modules) and self.utility.in_asterisk_list(platform, active_platform): + tags_to_build.add(tag) deployment_manifest = DeploymentManifest(self.envvars, self.output, self.utility, self.envvars.DEPLOYMENT_CONFIG_TEMPLATE_FILE, True) modules_to_process = deployment_manifest.get_modules_to_process() replacements = {} for module, platform in modules_to_process: - if module not in bypass_modules: - key = (module, platform) - if key in image_tag_map: - tag = image_tag_map.get(key) + key = (module, platform) + if key in image_tag_map: + tag = image_tag_map.get(key) + replacements["${{MODULES.{0}.{1}}}".format(module, platform)] = tag + if not self.utility.in_asterisk_list(module, bypass_modules): tags_to_build.add(tag) - replacements["${{MODULES.{0}.{1}}}".format(module, platform)] = tag for tag in tags_to_build: if tag in tag_dockerfile_map: diff --git a/iotedgedev/utility.py b/iotedgedev/utility.py index 13fc09b..d55f55f 100644 --- a/iotedgedev/utility.py +++ b/iotedgedev/utility.py @@ -103,6 +103,9 @@ class Utility: def get_active_docker_platform(self): return [platform.strip() for platform in self.envvars.ACTIVE_DOCKER_PLATFORMS.split(",") if platform] + def in_asterisk_list(self, item, asterisk_list): + return len(asterisk_list) > 0 and (asterisk_list[0] == "*" or item in asterisk_list) + def get_modules_in_config(self, moduleType): modules_config = json.load(open(self.envvars.DEPLOYMENT_CONFIG_FILE_PATH))
Image placeholder is not expanded when the module is in BYPASS_MODULES
Azure/iotedgedev
diff --git a/tests/test_utility.py b/tests/test_utility.py index 6786256..8242b6d 100644 --- a/tests/test_utility.py +++ b/tests/test_utility.py @@ -42,3 +42,15 @@ def test_copy_template_expand_env(utility, tmpdir): dest = tmpdir.join("deployment_template_2.dest.json").strpath utility.copy_template(test_file_1, dest, replacements=replacements, expand_env=True) assert_json_file_equal(test_file_2, dest) + + +def test_in_asterisk_list(utility): + assert utility.in_asterisk_list("filtermodule", "pipemodule, filtermodule") + + +def test_in_asterisk_list_empty(utility): + assert not utility.in_asterisk_list("filtermodule", "") + + +def test_in_asterisk_list_asterisk(utility): + assert utility.in_asterisk_list("filtermodule", "*")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 2 }
0.79
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
adal==1.2.7 applicationinsights==0.11.10 argcomplete==1.12.3 attrs==22.2.0 azure-cli==2.40.0 azure-cli-cloud==2.1.1 azure-cli-command-modules-nspkg==2.0.3 azure-cli-configure==2.0.24 azure-cli-core==2.40.0 azure-cli-extension==0.2.5 azure-cli-iot==0.3.11 azure-cli-nspkg==3.0.4 azure-cli-profile==2.1.5 azure-cli-resource==2.1.16 azure-cli-telemetry==1.0.8 azure-common==1.1.28 azure-core==1.24.2 azure-mgmt-authorization==0.50.0 azure-mgmt-core==1.3.2 azure-mgmt-iothub==0.8.2 azure-mgmt-iothubprovisioningservices==0.2.0 azure-mgmt-managementgroups==0.1.0 azure-mgmt-nspkg==3.0.2 azure-nspkg==3.0.2 bcrypt==4.0.1 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 click==8.0.4 cryptography==39.0.2 docker==5.0.3 fstrings==0.1.0 humanfriendly==10.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/Azure/iotedgedev.git@31d6219f41caf9401f5c49263a963e3a748e69bf#egg=iotedgedev isodate==0.6.1 jmespath==0.10.0 knack==0.10.1 msal==1.18.0b1 msal-extensions==1.0.0 msrest==0.7.1 msrestazure==0.6.4.post1 oauthlib==3.2.2 packaging==21.3 paramiko==2.12.0 pkginfo==1.10.0 pluggy==1.0.0 portalocker==2.7.0 psutil==5.9.8 py==1.11.0 pycparser==2.21 Pygments==2.14.0 PyJWT==2.4.0 PyNaCl==1.5.0 pyOpenSSL==23.2.0 pyparsing==3.1.4 PySocks==1.7.1 pytest==7.0.1 python-dateutil==2.9.0.post0 python-dotenv==0.20.0 PyYAML==6.0.1 requests==2.27.1 requests-oauthlib==2.0.0 six==1.17.0 tabulate==0.8.10 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 websocket-client==1.3.1 zipp==3.6.0
name: iotedgedev channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - adal==1.2.7 - applicationinsights==0.11.10 - argcomplete==1.12.3 - attrs==22.2.0 - azure-cli==2.40.0 - azure-cli-cloud==2.1.1 - azure-cli-command-modules-nspkg==2.0.3 - azure-cli-configure==2.0.24 - azure-cli-core==2.40.0 - azure-cli-extension==0.2.5 - azure-cli-iot==0.3.11 - azure-cli-nspkg==3.0.4 - azure-cli-profile==2.1.5 - azure-cli-resource==2.1.16 - azure-cli-telemetry==1.0.8 - azure-common==1.1.28 - azure-core==1.24.2 - azure-mgmt-authorization==0.50.0 - azure-mgmt-core==1.3.2 - azure-mgmt-iothub==0.8.2 - azure-mgmt-iothubprovisioningservices==0.2.0 - azure-mgmt-managementgroups==0.1.0 - azure-mgmt-nspkg==3.0.2 - azure-nspkg==3.0.2 - bcrypt==4.0.1 - cffi==1.15.1 - charset-normalizer==2.0.12 - click==8.0.4 - cryptography==39.0.2 - docker==5.0.3 - fstrings==0.1.0 - humanfriendly==10.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isodate==0.6.1 - jmespath==0.10.0 - knack==0.10.1 - msal==1.18.0b1 - msal-extensions==1.0.0 - msrest==0.7.1 - msrestazure==0.6.4.post1 - oauthlib==3.2.2 - packaging==21.3 - paramiko==2.12.0 - pkginfo==1.10.0 - pluggy==1.0.0 - portalocker==2.7.0 - psutil==5.9.8 - py==1.11.0 - pycparser==2.21 - pygments==2.14.0 - pyjwt==2.4.0 - pynacl==1.5.0 - pyopenssl==23.2.0 - pyparsing==3.1.4 - pysocks==1.7.1 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - python-dotenv==0.20.0 - pyyaml==6.0.1 - requests==2.27.1 - requests-oauthlib==2.0.0 - six==1.17.0 - tabulate==0.8.10 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - websocket-client==1.3.1 - zipp==3.6.0 prefix: /opt/conda/envs/iotedgedev
[ "tests/test_utility.py::test_in_asterisk_list", "tests/test_utility.py::test_in_asterisk_list_empty", "tests/test_utility.py::test_in_asterisk_list_asterisk" ]
[]
[ "tests/test_utility.py::test_copy_template", "tests/test_utility.py::test_copy_template_expand_env" ]
[]
MIT License
2,836
[ "iotedgedev/utility.py", "iotedgedev/modules.py" ]
[ "iotedgedev/utility.py", "iotedgedev/modules.py" ]
smdabdoub__kraken-biom-9
5cd8acea4b742447a2d315aeeba74646cc7c6c4f
2018-07-27 18:19:39
5cd8acea4b742447a2d315aeeba74646cc7c6c4f
diff --git a/.travis.yml b/.travis.yml index 8c3a27c..dd55a5f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,9 +2,9 @@ cache: apt sudo: false language: python python: + - "3.3" - "3.4" - "3.5" - - "3.6" addons: apt: packages: diff --git a/kraken_biom.py b/kraken_biom.py index 728206b..0f7b369 100755 --- a/kraken_biom.py +++ b/kraken_biom.py @@ -6,13 +6,13 @@ Kraken output (http://ccb.jhu.edu/software/kraken/). """ from __future__ import absolute_import, division, print_function -from pathlib import Path import argparse from collections import OrderedDict import csv from datetime import datetime as dt from gzip import open as gzip_open import os.path as osp +import re import sys from textwrap import dedent as twdd @@ -33,7 +33,7 @@ __license__ = "MIT" __url__ = "http://github.com/smdabdoub/kraken-biom" __maintainer__ = "Shareef M. Dabdoub" __email__ = "[email protected]" -__version__ = '1.1.0' +__version__ = '1.0.1' field_names = ["pct_reads", "clade_reads", "taxon_reads", @@ -81,6 +81,31 @@ def tax_fmt(tax_lvl, end): # print(tax) return tax +def parse_tax_lvl(entry, tax_lvl_depth=[]): + """ + Parse a single kraken-report entry and return a dictionary of taxa for its + named ranks. + + :type entry: dict + :param entry: attributes of a single kraken-report row. + :type tax_lvl_depth: list + :param tax_lvl_depth: running record of taxon levels encountered in + previous calls. + """ + # How deep in the hierarchy are we currently? Each two spaces of + # indentation is one level deeper. Also parse the scientific name at this + # level. + depth_and_name = re.match('^( *)(.*)', entry['sci_name']) + depth = len(depth_and_name.group(1))//2 + name = depth_and_name.group(2) + # Remove the previous levels so we're one higher than the level of the new + # taxon. (This also works if we're just starting out or are going deeper.) + del tax_lvl_depth[depth:] + # Append the new taxon. + tax_lvl_depth.append((entry['rank'], name)) + # Create a tax_lvl dict for the named ranks. + tax_lvl = {x[0]: x[1] for x in tax_lvl_depth if x[0] in ranks} + return(tax_lvl) def parse_kraken_report(kdata, max_rank, min_rank): """ @@ -95,8 +120,6 @@ def parse_kraken_report(kdata, max_rank, min_rank): taxa = OrderedDict() # the master collection of read counts (keyed on NCBI ID) counts = OrderedDict() - # running record of the current taxonomic hierarchy - tax_lvl = {} # current rank r = 0 max_rank_idx = ranks.index(max_rank) @@ -105,18 +128,13 @@ def parse_kraken_report(kdata, max_rank, min_rank): for entry in kdata: erank = entry['rank'].strip() # print("erank: "+erank) - - # move back up the taxa tree to the current level - if erank in ranks and ranks.index(erank) < r: - r = ranks.index(erank) - tax_lvl = {r: tax_lvl[r] if r in tax_lvl else '' for r in ranks[:r]} - - # add current rank to running tally of ranks + if erank in ranks: - tax_lvl[erank] = entry["sci_name"].strip() - # print("Recording tax level ({}): {}".format(erank, tax_lvl[erank])) r = ranks.index(erank) + # update running tally of ranks + tax_lvl = parse_tax_lvl(entry) + # record the reads assigned to this taxon level, and record the taxonomy string with the NCBI ID if erank in ranks and min_rank_idx >= ranks.index(entry['rank']) >= max_rank_idx: taxon_reads = int(entry["taxon_reads"]) @@ -313,10 +331,8 @@ def handle_program_options(): parser = argparse.ArgumentParser(description=twdd(descr), formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('kraken_reports', nargs='*', + parser.add_argument('kraken_reports', nargs='+', help="Results files from the kraken-report tool.") - parser.add_argument('-k', '--kraken_reports_fp', metavar="REPORTS_FP", - help="Folder containing kraken reports") parser.add_argument('--max', default="O", choices=ranks[:-1], help="Assigned reads will be recorded only if \ they are at or below max rank. Default: O.") @@ -366,16 +382,12 @@ def main(): Defaulting to BIOM 1.0 (JSON).""" print(twdd(msg)) - if ranks.index(args.max) > ranks.index(args.min): + if ranks.index(args.max) >= ranks.index(args.min): msg = "ERROR: Max and Min ranks are out of order: {} < {}" sys.exit(msg.format(args.max, args.min)) - reports = args.kraken_reports - if args.kraken_reports_fp: - reports += [str(p) for p in Path(args.kraken_reports_fp).glob('*')] - # load all kraken-report files and parse them - sample_counts, taxa = process_samples(reports, + sample_counts, taxa = process_samples(args.kraken_reports, max_rank=args.max, min_rank=args.min)
Taxonomic hierarchy incorrect for certain cases We've found that in some edge cases, the taxonomic hierarchy kraken-biom assigns to a given ID is incorrect. It looks like the assumption that a given rank in the Kraken report always falls under the most recent higher rank (for example, for a given "S" entry for species, the closest "P" for phylum in the previous lines in the file) is not always true. This is with kraken-biom 1.0.1 under Python 3.5 (in Anaconda) on Linux. Here's a chunk from an example Kraken report, where I'm specifically looking at 5693 (Trypanosoma cruzi) near the bottom. ``` ... 0.00 1 0 P 3041 Chlorophyta 0.00 1 1 C 75966 Trebouxiophyceae 0.00 2 0 - 556282 Jakobida 0.00 2 0 G 221723 Seculamonas 0.00 2 2 S 221724 Seculamonas ecuadoriensis 0.00 1 0 - 33682 Euglenozoa 0.00 1 0 O 5653 Kinetoplastida 0.00 1 0 F 5654 Trypanosomatidae 0.00 1 0 G 5690 Trypanosoma 0.00 1 0 - 47570 Schizotrypanum 0.00 1 0 S 5693 Trypanosoma cruzi 0.00 1 1 - 353153 Trypanosoma cruzi strain CL Brener ... ``` Looking from ID 5693 on up, in terms of indentation in the last column: Kraken shows taxa up through "O", then an un-ranked taxon (Euglenozoa), and then nothing for a *very* long time until Eukaryota many lines above (not shown). The phylum Chlorophyta and class Trebouxiophyceae do not actually contain Trypanosoma cruzi; they're just the closest previous phylum and class shown above that species in the file. But kraken-biom's output gives this Consensus Lineage for ID 5693: ``` k__Eukaryota; p__Chlorophyta; c__Trebouxiophyceae; o__Kinetoplastida; f__Trypanosomatidae; g__Trypanosoma; s__cruzi ``` The [NCBI Taxonomy Browser](https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Info&id=5693&lvl=3&p=has_linkout&p=blast_url&p=genome_blast&p=mapview&lin=f&keep=1&srchmode=1&unlock) seems to match what I saw in Kraken, with Kingdom=Eukaryota; Unranked=Euglenozoa; Order=Kinetoplastida; Family=Trypanosomatidae; etc. (No explicit phylum or class listed.) I can't say for sure because [the Kraken documentation](https://ccb.jhu.edu/software/kraken/MANUAL.html) doesn't go into detail, but it looks to me like it's the *indentation for the scientific name* that corresponds to the hierarchy and to what rank sits above a given entry, and not necessarily the *rank of the previous taxa listed*. So in my case even though the previous "P" in the report file is Chlorophyta, that group doesn't actually include ID 5693, so we shouldn't have a phylum or class assigned.
smdabdoub/kraken-biom
diff --git a/test/test_parsing.py b/test/test_parsing.py index cc092dd..ac91bd5 100644 --- a/test/test_parsing.py +++ b/test/test_parsing.py @@ -129,6 +129,16 @@ class kraken_biom_Test(unittest.TestCase): 0.00 1 0 - 206037 Human endogenous retroviruses 0.00 1 0 S 45617 Human endogenous retrovirus K 0.00 1 1 - 166122 Human endogenous retrovirus K113 + 0.00 3 1 D 2759 Eukaryota + 0.00 1 0 P 3041 Chlorophyta + 0.00 1 1 C 75966 Trebouxiophyceae + 0.00 1 0 - 33682 Euglenozoa + 0.00 1 0 O 5653 Kinetoplastida + 0.00 1 0 F 5654 Trypanosomatidae + 0.00 1 0 G 5690 Trypanosoma + 0.00 1 0 - 47570 Schizotrypanum + 0.00 1 0 S 5693 Trypanosoma cruzi + 0.00 1 1 - 353153 Trypanosoma cruzi strain CL Brener """))) def run_parse_kraken_report(self, manual, max_rank, min_rank): @@ -169,7 +179,8 @@ class kraken_biom_Test(unittest.TestCase): '37734': 9, '45617': 1, '543': 6, - '562': 1} + '562': 1, + '5693': 1} self.run_parse_kraken_report(manual, max_rank="O", min_rank="S") @@ -198,7 +209,8 @@ class kraken_biom_Test(unittest.TestCase): '37734': 9, '45617': 1, '543': 6, - '562': 1} + '562': 1, + '5693': 1} self.run_parse_kraken_report(manual, max_rank="F", min_rank="S") @@ -226,7 +238,8 @@ class kraken_biom_Test(unittest.TestCase): '374840': 2, '37734': 9, '45617': 1, - '562': 1} + '562': 1, + '5693': 1} self.run_parse_kraken_report(manual, max_rank="G", min_rank="S") @@ -252,7 +265,8 @@ class kraken_biom_Test(unittest.TestCase): '374840': 2, '37734': 9, '45617': 1, - '562': 1} + '562': 1, + '5693': 1} self.run_parse_kraken_report(manual, max_rank="S", min_rank="S") @@ -279,7 +293,9 @@ class kraken_biom_Test(unittest.TestCase): '29465': 2, '48736': 11, '543': 6, - '561': 1} + '561': 1, + '75966': 1, + '5690': 1} self.run_parse_kraken_report(manual, max_rank="C", min_rank="G") @@ -307,7 +323,9 @@ class kraken_biom_Test(unittest.TestCase): "s__Hyposoter fugitivus ichnovirus"], "374840":["k__Viruses", "p__", "c__", "o__", "f__Microviridae", "g__Microvirus", - "s__Enterobacteria phage phiX174 sensu lato"] + "s__Enterobacteria phage phiX174 sensu lato"], + "5693":["k__Eukaryota", "p__", "c__", "o__Kinetoplastida", + "f__Trypanosomatidae", "g__Trypanosoma", "s__cruzi"] } _, taxa = kb.parse_kraken_report(self.sample_kraken_rep, @@ -324,4 +342,4 @@ class kraken_biom_Test(unittest.TestCase): if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "numpy>=1.16.0", "pandas>=1.0.0" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libatlas-base-dev" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
biom-format==2.1.16 click==8.1.8 exceptiongroup==1.2.2 h5py==3.13.0 iniconfig==2.1.0 -e git+https://github.com/smdabdoub/kraken-biom.git@5cd8acea4b742447a2d315aeeba74646cc7c6c4f#egg=kraken_biom numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 scipy==1.13.1 six==1.17.0 tomli==2.2.1 tzdata==2025.2
name: kraken-biom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - biom-format==2.1.16 - click==8.1.8 - exceptiongroup==1.2.2 - h5py==3.13.0 - iniconfig==2.1.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - scipy==1.13.1 - six==1.17.0 - tomli==2.2.1 - tzdata==2025.2 prefix: /opt/conda/envs/kraken-biom
[ "test/test_parsing.py::kraken_biom_Test::test_parse_kraken_report_taxonomy" ]
[]
[ "test/test_parsing.py::kraken_biom_Test::test_parse_kraken_report_C_G", "test/test_parsing.py::kraken_biom_Test::test_parse_kraken_report_F_S", "test/test_parsing.py::kraken_biom_Test::test_parse_kraken_report_G_S", "test/test_parsing.py::kraken_biom_Test::test_parse_kraken_report_O_S", "test/test_parsing.py::kraken_biom_Test::test_parse_kraken_report_S" ]
[]
MIT License
2,837
[ "kraken_biom.py", ".travis.yml" ]
[ "kraken_biom.py", ".travis.yml" ]
J-CPelletier__webcomix-7
25d394314ce26816302e9c878f5cebfb853c16fb
2018-07-27 20:03:35
25d394314ce26816302e9c878f5cebfb853c16fb
diff --git a/webcomix/main.py b/webcomix/main.py index 0053bbb..cba546d 100644 --- a/webcomix/main.py +++ b/webcomix/main.py @@ -56,7 +56,12 @@ def download(name, cbz): default=False, is_flag=True, help="Outputs the comic as a cbz file") -def search(name, start_url, cbz): [email protected]( + "-y", + default=False, + is_flag=True, + help="Assumes 'yes' as an answer to all prompts") +def search(name, start_url, cbz, y): """ Downloads a webcomic using a general XPath """ @@ -67,8 +72,8 @@ def search(name, start_url, cbz): comic.comic_image_selector) print_verification(validation) click.echo( - "Verify that the links above are correct before proceeding.") - if click.confirm("Are you sure you want to proceed?"): + "Verify that the links above are correct.") + if y or click.confirm("Are you sure you want to proceed?"): comic.download(name) if cbz: comic.make_cbz(name, name) @@ -100,7 +105,12 @@ def search(name, start_url, cbz): default=False, is_flag=True, help="Outputs the comic as a cbz file") -def custom(comic_name, start_url, next_page_xpath, image_xpath, cbz): [email protected]( + "-y", + default=False, + is_flag=True, + help="Assumes 'yes' as an answer to all prompts") +def custom(comic_name, start_url, next_page_xpath, image_xpath, cbz, y): """ Downloads a user-defined webcomic """ @@ -109,8 +119,8 @@ def custom(comic_name, start_url, next_page_xpath, image_xpath, cbz): comic.next_page_selector, comic.comic_image_selector) print_verification(validation) - click.echo("Verify that the links above are correct before proceeding.") - if click.confirm("Are you sure you want to proceed?"): + click.echo("Verify that the links above are correct.") + if y or click.confirm("Are you sure you want to proceed?"): comic.download(comic_name) if cbz: comic.make_cbz(comic_name, comic_name)
Custom: Add a -y (yes) option Looking to use this as a replacement for Dosage, as this allows for custom comics. I'd like to run this daily (or every few days) on a number of comics to pull latest comic. The prompting on Custom Comics (are you sure) is a stumbling block to script it. Can you maybe add a -y to custom, for auto-acknowledging?
J-CPelletier/webcomix
diff --git a/webcomix/tests/test_main.py b/webcomix/tests/test_main.py index ec2c718..c2dcf42 100644 --- a/webcomix/tests/test_main.py +++ b/webcomix/tests/test_main.py @@ -100,7 +100,7 @@ def test_custom(monkeypatch): assert result.exit_code == 0 assert result.output.strip() == "\n".join([ "Verified", "Printed", - "Verify that the links above are correct before proceeding.", + "Verify that the links above are correct.", "Are you sure you want to proceed? [y/N]: yes", "foo" ]) @@ -119,7 +119,7 @@ def test_custom_make_cbz(monkeypatch): assert result.exit_code == 0 assert result.output.strip() == "\n".join([ "Verified", "Printed", - "Verify that the links above are correct before proceeding.", + "Verify that the links above are correct.", "Are you sure you want to proceed? [y/N]: y", "foo", ".cbz created" ]) @@ -139,6 +139,6 @@ def test_search(monkeypatch): assert result.exit_code == 0 assert result.output.strip() == "\n".join([ "Verified", "Printed", - "Verify that the links above are correct before proceeding.", + "Verify that the links above are correct.", "Are you sure you want to proceed? [y/N]: y", "foo" ])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install --editable .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 exceptiongroup==1.2.2 fake-useragent==2.1.0 idna==3.10 importlib_resources==6.5.2 iniconfig==2.1.0 lxml==5.3.1 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 requests==2.32.3 tomli==2.2.1 urllib3==2.3.0 -e git+https://github.com/J-CPelletier/webcomix.git@25d394314ce26816302e9c878f5cebfb853c16fb#egg=webcomix zipp==3.21.0
name: webcomix channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - exceptiongroup==1.2.2 - fake-useragent==2.1.0 - idna==3.10 - importlib-resources==6.5.2 - iniconfig==2.1.0 - lxml==5.3.1 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - requests==2.32.3 - tomli==2.2.1 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/webcomix
[ "webcomix/tests/test_main.py::test_custom", "webcomix/tests/test_main.py::test_custom_make_cbz", "webcomix/tests/test_main.py::test_search" ]
[]
[ "webcomix/tests/test_main.py::test_print_verification", "webcomix/tests/test_main.py::test_comics", "webcomix/tests/test_main.py::test_good_download", "webcomix/tests/test_main.py::test_bad_download", "webcomix/tests/test_main.py::test_good_download_makecbz", "webcomix/tests/test_main.py::test_bad_download_make_cbz" ]
[]
MIT License
2,838
[ "webcomix/main.py" ]
[ "webcomix/main.py" ]
CORE-GATECH-GROUP__serpent-tools-220
9429173b3a7a42bad6e4e0b791f8456f99eb606a
2018-07-27 20:20:45
03997bdce0a5adb75cf5796278ea61b799f7b6dc
diff --git a/docs/develop/logging.rst b/docs/develop/logging.rst index 8f36e03..c5c061c 100644 --- a/docs/develop/logging.rst +++ b/docs/develop/logging.rst @@ -31,3 +31,10 @@ or removed in the future. .. autofunction:: serpentTools.messages.willChange + +Custom Handlers +=============== + +.. autoclass:: serpentTools.messages.DictHandler + :show-inheritance: + :no-inherited-members: diff --git a/docs/develop/utils.rst b/docs/develop/utils.rst index 848929c..6d251ab 100644 --- a/docs/develop/utils.rst +++ b/docs/develop/utils.rst @@ -8,3 +8,16 @@ Utilities .. automodule:: serpentTools.utils :members: convertVariableName, linkToWiki, str2vec, splitValsUnc + +.. _dev-testUtils: + +================= +Testing Utilities +================= + +.. autoclass:: serpentTools.tests.utils.LoggerMixin + +.. autoclass:: serpentTools.tests.utils.TestCaseWithLogCapture + :show-inheritance: + :no-inherited-members: + :members: setUp, tearDown, assertMsgInLogs, assertMsgNotInLogs diff --git a/serpentTools/messages.py b/serpentTools/messages.py index b040939..8ef4f26 100644 --- a/serpentTools/messages.py +++ b/serpentTools/messages.py @@ -10,6 +10,7 @@ See Also import functools import warnings import logging +from logging import Handler from logging.config import dictConfig @@ -27,6 +28,10 @@ class MismatchedContainersError(SamplerError): """Attempting to sample from dissimilar containers""" pass +# +# Logger options +# + LOG_OPTS = ['critical', 'error', 'warning', 'info', 'debug'] @@ -46,9 +51,12 @@ loggingConfig = { 'stream': 'ext://sys.stdout' } }, - 'root': { - 'handlers': ['console'], - 'level': logging.WARNING + 'loggers': { + 'serpentTools': { + 'handlers': ['console'], + 'level': logging.WARNING, + 'propagate': False, + }, } } @@ -57,6 +65,35 @@ dictConfig(loggingConfig) __logger__ = logging.getLogger('serpentTools') +def addHandler(handler): + """ + Add a handler to the logger + + Parameters + ---------- + handler: :class:`python.logging.Handler` + Subclass to handle the formatting and emitting + of log messages + """ + if not issubclass(handler.__class__, Handler): + raise TypeError("Handler {} is of class {} and does not appear " + "to be a subclass of {}" + .format(handler, handler.__class__, Handler)) + return __logger__.addHandler(handler) + + +def removeHandler(handler): + """ + Remove a handler from the internal logger + + Parameters + ---------- + handler: :class:`python.logging.Handler` + Handler to be removed + """ + return __logger__.removeHandler(handler) + + def debug(message): """Log a debug message.""" __logger__.debug('%s', message) @@ -126,3 +163,44 @@ def _updateFilterAlert(msg, category): warnings.simplefilter('always', category) warnings.warn(msg, category=category, stacklevel=3) warnings.simplefilter('default', category) + + +class DictHandler(Handler): + """ + Handler that stores log messages in a dictionary + + Attributes + ---------- + logMessages: dict + Dictionary of lists where each key is a log level such + as ``'DEBUG'`` or ``'WARNING'``. The list associated + with each key contains all messages called under that + logging level + """ + def __init__(self, level=logging.NOTSET): + Handler.__init__(self, level) + self.logMessages = {} + + def flush(self): + """Clear the log messages dictionary""" + self.logMessages = {} + + def close(self): + """Tidy up before removing from list of handlers""" + self.logMessages = {} + Handler.close(self) + + def emit(self, record): + """ + Store the message in the log messages by level. + + Does no formatting to the record, simply stores + the message in :attr:`logMessages` dictionary + according to the records ``levelname`` + + Anticipates a :class:`logging.LogRecord` object + """ + level = record.levelname + if level not in self.logMessages: + self.logMessages[level] = [] + self.logMessages[level].append(record.getMessage())
Determine and implement a way to test our message/logging systems Working on the comparison feature, I realize we don't have a way to ensure that our logging system is actually logging what we want. This is more relevant for the testing the comparison utilities, and making sure that we are correctly notifying the user about the locations of differences. For example, if we take two identical readers, read from the same file, then the comparison routine should print no bad messages and return `True`. To test more of the internals, we would start tweaking values, both by shape and by contents. Including and/or removing extra values, in the `resdata` dictionary, would further test the internals. ## Possible suggestions 1. Have our loggers work as a LIFO list, where we can stack additional loggers (primary being our existing logger, secondary being this testable variant) and utilize only the last value in the list for logging 1. Swapping out loggers during testing, e.g. in `setUpModule`, switch the logging system to one that captures and stores messages 1. Utilize a custom [`Handler` object](https://docs.python.org/3.6/library/logging.html#handler-objects) object that places messages in dictionaries 1. Subclass Logger and overwrite the [`makeRecord`](https://docs.python.org/3.6/library/logging.html#logging.Logger.makeRecord) method to store data on a dictionary as well
CORE-GATECH-GROUP/serpent-tools
diff --git a/serpentTools/tests/test_depSampler.py b/serpentTools/tests/test_depSampler.py index 2ad166b..dcd2640 100644 --- a/serpentTools/tests/test_depSampler.py +++ b/serpentTools/tests/test_depSampler.py @@ -14,7 +14,7 @@ File Descriptions *. ``bwr_missingT`` is missing the final burnup step """ -import unittest +from unittest import TestCase from six import iteritems from numpy import where, fabs, ndarray @@ -25,21 +25,24 @@ from serpentTools.data import getFile from serpentTools.parsers.depletion import DepletionReader from serpentTools.samplers.depletion import DepletionSampler from serpentTools.tests import computeMeansErrors +from serpentTools.tests.utils import TestCaseWithLogCapture _testFileNames = {'0', '1', 'badInventory', 'longT', 'missingT'} DEP_FILES = {key: getFile('bwr_{}_dep.m'.format(key)) for key in _testFileNames} -class DepletionSamplerFailTester(unittest.TestCase): +class DepletionSamplerFailTester(TestCaseWithLogCapture): def test_badInventory(self): """Verify an error is raised for files with dissimilar isotopics""" self._mismatchedFiles(DEP_FILES['badInventory']) + self.assertMsgInLogs("ERROR", DEP_FILES['badInventory'], partial=True) def test_missingTimeSteps(self): """Verify an error is raised if length of time steps are dissimilar""" self._mismatchedFiles(DEP_FILES['missingT']) + self.assertMsgInLogs("ERROR", DEP_FILES['missingT'], partial=True) def _mismatchedFiles(self, badFilePath, errorType=MismatchedContainersError): @@ -48,7 +51,7 @@ class DepletionSamplerFailTester(unittest.TestCase): DepletionSampler(files) -class DepletedSamplerTester(unittest.TestCase): +class DepletedSamplerTester(TestCase): """ Class that reads two similar files and validates the averaging and uncertainty propagation. @@ -107,4 +110,5 @@ class DepletedSamplerTester(unittest.TestCase): if __name__ == '__main__': - unittest.main() + from unittest import main + main() diff --git a/serpentTools/tests/test_detSampler.py b/serpentTools/tests/test_detSampler.py index a49a4fe..bc3daaa 100644 --- a/serpentTools/tests/test_detSampler.py +++ b/serpentTools/tests/test_detSampler.py @@ -21,8 +21,6 @@ File Descriptions tolerance can still be achieved. """ -import unittest - from six import iteritems from numpy import square, sqrt @@ -32,6 +30,7 @@ from serpentTools.messages import MismatchedContainersError from serpentTools.data import getFile from serpentTools.parsers.detector import DetectorReader from serpentTools.samplers.detector import DetectorSampler +from serpentTools.tests.utils import TestCaseWithLogCapture _DET_FILES = { 'bwr0': 'bwr_0', @@ -50,7 +49,7 @@ TOLERANCES = { } -class DetSamplerTester(unittest.TestCase): +class DetSamplerTester(TestCaseWithLogCapture): """ Tester that looks for errors in mismatched detector files and validates the averaging and uncertainty propagation @@ -70,6 +69,7 @@ class DetSamplerTester(unittest.TestCase): def setUp(self): self._checkContents() + TestCaseWithLogCapture.setUp(self) def test_properlyAveraged(self): """Validate the averaging for two unique detector files""" @@ -90,12 +90,16 @@ class DetSamplerTester(unittest.TestCase): files = [getFile(fp) for fp in ['bwr_0_det0.m', 'bwr_noxy_det0.m']] self._raisesMisMatchError(files) + self.assertMsgInLogs("ERROR", "detectors: Parser files", partial=True) def test_differentSizedDetectors(self): """Verify that an error is raised if detector shapes are different""" files = [getFile(fp) for fp in ['bwr_0_det0.m', 'bwr_smallxy_det0.m']] self._raisesMisMatchError(files) + self.assertMsgInLogs( + "ERROR", "shape: Parser files", + partial=True) def _raisesMisMatchError(self, files): with self.assertRaises(MismatchedContainersError): @@ -118,4 +122,5 @@ def _getExpectedAverages(d0, d1): if __name__ == '__main__': - unittest.main() + from unittest import main + main() diff --git a/serpentTools/tests/test_messages.py b/serpentTools/tests/test_messages.py new file mode 100644 index 0000000..fabd8e3 --- /dev/null +++ b/serpentTools/tests/test_messages.py @@ -0,0 +1,95 @@ +""" +Test the logging and messaging functions +""" + +from unittest import TestCase +from warnings import catch_warnings + +from serpentTools.messages import ( + deprecated, willChange, + addHandler, removeHandler, + __logger__, + debug, info, warning, error, critical, +) +from serpentTools.settings import rc +from serpentTools.tests.utils import TestCaseWithLogCapture, LoggerMixin + + +LOGGER_FUNCTIONS = [debug, info, warning, error, critical] + + +class DecoratorTester(TestCase): + """Class to test the decorators for warnings.""" + + def test_futureDecorator(self): + """Verify that the future decorator doesn't break""" + + @willChange('This function will be updated in the future, ' + 'but will still exist') + def demoFuture(x, val=5): + return x + val + + with catch_warnings(record=True) as record: + self.assertEqual(7, demoFuture(2)) + self.assertEqual(7, demoFuture(2, 5)) + self.assertEquals(len(record), 2, + 'Did not catch two warnings::willChange') + + def test_deprecatedDecorator(self): + """Verify that the deprecated decorator doesn't break things""" + + @deprecated('this nonexistent function') + def demoFunction(x, val=5): + return x + val + + with catch_warnings(record=True) as record: + self.assertEqual(7, demoFunction(2)) + self.assertEqual(7, demoFunction(2, 5)) + self.assertEquals(len(record), 2, + 'Did not catch two warnings::deprecation') + + +class LoggingTester(TestCaseWithLogCapture): + """ + Class for testing various logging capabilities + """ + + def test_logger(self): + """Test the basic logging functions.""" + searchMessage = "test_logger" + with rc: + rc['verbosity'] = 'debug' + for logFunc in LOGGER_FUNCTIONS: + funcLevel = logFunc.__name__.upper() + logFunc(searchMessage) + self.msgInLogs(funcLevel, searchMessage) + + def test_addRemoveHandlers(self): + """Test that the add/remove handler functions work.""" + with self.assertRaises(TypeError): + addHandler(1) + addHandler(self.handler) + self.assertIn(self.handler, __logger__.handlers, + msg="addHandler did not add the handler") + removeHandler(self.handler) + self.assertNotIn(self.handler, __logger__.handlers, + msg="removeHandler did not remove the handler") + + def test_keyInLogs(self): + """Verify the behavrior of LoggerMixin.msgInLogs""" + message = "look for me" + warning(message) + self.assertMsgInLogs("WARNING", message) + self.assertMsgInLogs("WARNING", message[:5], partial=True) + self.assertMsgNotInLogs("WARNING", "<none>") + self.assertMsgNotInLogs("WARNING", "<none>", partial=True) + with self.assertRaises(KeyError): + self.msgInLogs("DEBUG", message) + with self.assertRaises(AttributeError): + newM = LoggerMixin() + newM.msgInLogs("WARNING", message) + + +if __name__ == '__main__': + from unittest import main + main() diff --git a/serpentTools/tests/test_settings.py b/serpentTools/tests/test_settings.py index 00beffb..b4d60a2 100644 --- a/serpentTools/tests/test_settings.py +++ b/serpentTools/tests/test_settings.py @@ -1,16 +1,15 @@ """Tests for the settings loaders.""" from os import remove -import warnings -import unittest +from unittest import TestCase import yaml import six from serpentTools import settings -from serpentTools.messages import deprecated, willChange +from serpentTools.tests.utils import TestCaseWithLogCapture -class DefaultSettingsTester(unittest.TestCase): +class DefaultSettingsTester(TestCase): """Class to test the functionality of the master loader.""" @classmethod @@ -34,7 +33,7 @@ class DefaultSettingsTester(unittest.TestCase): return self.defaultLoader[setting].default -class RCTester(unittest.TestCase): +class RCTester(TestCase): """Class to test the functionality of the scriptable settings manager.""" @classmethod @@ -107,7 +106,7 @@ class RCTester(unittest.TestCase): self.assertSetEqual(expected, actual) -class ConfigLoaderTester(unittest.TestCase): +class ConfigLoaderTester(TestCaseWithLogCapture): """Class to test loading multiple setttings at once, i.e. config files""" @classmethod @@ -168,38 +167,9 @@ class ConfigLoaderTester(unittest.TestCase): badSettings.update(self.nestedSettings) self._writeTestRemoveConfFile(badSettings, self.files['nested'], self.configSettings, False) - - -class MessagingTester(unittest.TestCase): - """Class to test the messaging framework.""" - - def test_futureDecorator(self): - """Verify that the future decorator doesn't break""" - - @willChange('This function will be updated in the future, ' - 'but will still exist') - def demoFuture(x, val=5): - return x + val - - with warnings.catch_warnings(record=True) as record: - self.assertEqual(7, demoFuture(2)) - self.assertEqual(7, demoFuture(2, 5)) - self.assertEquals(len(record), 2, - 'Did not catch two warnings::willChange') - - def test_depreciatedDecorator(self): - """Verify that the depreciated decorator doesn't break things""" - - @deprecated('this nonexistent function') - def demoFunction(x, val=5): - return x + val - - with warnings.catch_warnings(record=True) as record: - self.assertEqual(7, demoFunction(2)) - self.assertEqual(7, demoFunction(2, 5)) - self.assertEquals(len(record), 2, - 'Did not catch two warnings::deprecation') + self.assertMsgInLogs("ERROR", "bad setting", partial=True) if __name__ == '__main__': - unittest.main() + from unittest import main + main() diff --git a/serpentTools/tests/utils.py b/serpentTools/tests/utils.py new file mode 100644 index 0000000..69ee4d5 --- /dev/null +++ b/serpentTools/tests/utils.py @@ -0,0 +1,165 @@ +""" +Utilities to make testing easier +""" + +from unittest import TestCase +from logging import NOTSET + +from serpentTools.messages import ( + DictHandler, __logger__, removeHandler, addHandler, +) + + +class LoggerMixin(object): + """ + Mixin class captures log messages + + Attributes + ---------- + handler: :class:`serpentTools.messages.DictHandler` + Logging handler that stores messages in a + :attr:`serpentTools.messages.DictHandler.logMessages` + dictionary according to level. + """ + def __init__(self): + self.__old = [] + self.handler = None + + def attach(self, level=NOTSET): + """ + Attach the :class:`serpentTools.messages.DictHandler` + + Removes all :class:`logging.Handler` objects from the + old logger, and puts them back when :class:`detach` is + called + + Parameters + ---------- + level: int + Initial level to apply to handler + """ + self.handler = DictHandler(level) + self.__old = __logger__.handlers + for handler in self.__old: + removeHandler(handler) + addHandler(self.handler) + + def detach(self): + """Restore the original handers to the main logger""" + if self.handler is None: + raise AttributeError("Handler not set. Possibly not attached.") + removeHandler(self.handler) + for handler in self.__old: + addHandler(handler) + self.handler = None + self.__old = [] + + def msgInLogs(self, level, msg, partial=False): + """ + Determine if the message is contained in the logs + + Parameters + ---------- + level: str + Level under which this message was posted. + Must be a key in the + :attr:`~serpentTools.messages.DictHandler.logMessages` + on the :attr:`handler` for this class + msg: str + Message to be found in the logs. + partial: bool + If this evaluates to true, then search through each + ``message`` in `logMessages` and return ``True`` if + ``msg in message``. Otherwise, look for exact matches + + Returns + ------- + bool: + If the message was found in the logs + + Raises + ------ + KeyError: + If the level was not found in the logs + AttributeError: + If the :attr:`handler` has not been created with :meth:`attach` + """ + if self.handler is None: + raise AttributeError("Handler has not been attached. Must run " + "<attach> first") + logs = self.handler.logMessages + if level not in logs: + raise KeyError("Level {} not found in logs. Existing levels:\n{}" + .format(level, list(sorted(logs.keys())))) + if not partial: + return msg in logs[level] + for message in logs[level]: + if msg in message: + return True + return False + + +class TestCaseWithLogCapture(TestCase, LoggerMixin): + """ + Lightly overwritten :class:`unittest.TestCase` that captures logs + + Mix in the :class:`LoggerMixin` to automatically + :meth:`~LoggerMixin.attach` during + :meth:`~unittest.TestCase.setUp` and :meth:`~LoggerMixin.detach` + during :meth:`~unittest.TestCase.tearDown` + + Intended to be subclassed for actual test methods + """ + + def __init__(self, *args, **kwargs): + TestCase.__init__(self, *args, **kwargs) + LoggerMixin.__init__(self) + + def setUp(self): + """ + Method to be called before every individual test. + + Call :meth:`~serpentTools.tests.utils.LoggerMixin.attach` + to capture any log messages that would be presented during testing. + Should be called during any subclassing. + """ + LoggerMixin.attach(self) + + def tearDown(self): + """ + Method to be called immediately after calling and recording test + + Call :meth:`~serpentTools.tests.utils.LoggerMixin.detach` + to reset the module logger to its original state. + Should be called during any subclassing. + """ + LoggerMixin.detach(self) + + def _concatLogs(self, level): + logs = self.handler.logMessages.get(level, []) + return "\n- ".join([str(item) for item in logs]) + + def assertMsgInLogs(self, level, msg, partial=False): + """ + Assert that the message was stored under a given level + + Combines :meth:`LoggerMixin.msgInLogs` with + :meth:`unittest.TestCase.assertTrue` + """ + matchType = "a partial" if partial else "an exact" + failMsg = "Could not find {} match for {} under {}\n{}".format( + matchType, msg, level, self._concatLogs(level)) + self.assertTrue(self.msgInLogs(level, msg, partial), + msg=failMsg) + + def assertMsgNotInLogs(self, level, msg, partial=False): + """ + Assert that the message was not stored under a given level + + Combines :meth:`LoggerMixin.msgInLogs` with + :meth:`unittest.TestCase.assertFalse` + """ + matchType = "a partial" if partial else "an exact" + failMsg = "Found {} match for {} under {} but should not have" + self.assertFalse(self.msgInLogs(level, msg, partial), + msg=failMsg.format(matchType, msg, level))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 3 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements-test.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.6.2 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 backcall==0.2.0 bleach==4.1.0 certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 comm==0.1.4 contextvars==2.4 coverage==6.2 cycler==0.11.0 dataclasses==0.8 decorator==5.1.1 defusedxml==0.7.1 entrypoints==0.4 execnet==1.9.0 flake8==5.0.4 idna==3.10 immutables==0.19 importlib-metadata==4.2.0 iniconfig==1.1.1 ipykernel==5.5.6 ipython==7.16.3 ipython-genutils==0.2.0 ipywidgets==7.8.5 jedi==0.17.2 Jinja2==3.0.3 json5==0.9.16 jsonschema==3.2.0 jupyter==1.1.1 jupyter-client==7.1.2 jupyter-console==6.4.3 jupyter-core==4.9.2 jupyter-server==1.13.1 jupyterlab==3.2.9 jupyterlab-pygments==0.1.2 jupyterlab-server==2.10.3 jupyterlab_widgets==1.1.11 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.7.0 mistune==0.8.4 nbclassic==0.3.5 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nest-asyncio==1.6.0 notebook==6.4.10 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 parso==0.7.1 pexpect==4.9.0 pickleshare==0.7.5 Pillow==8.4.0 pluggy==1.0.0 prometheus-client==0.17.1 prompt-toolkit==3.0.36 ptyprocess==0.7.0 py==1.11.0 pycodestyle==2.9.1 pycparser==2.21 pyflakes==2.5.0 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 Send2Trash==1.8.3 -e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@9429173b3a7a42bad6e4e0b791f8456f99eb606a#egg=serpentTools six==1.17.0 sniffio==1.2.0 terminado==0.12.1 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 wcwidth==0.2.13 webencodings==0.5.1 websocket-client==1.3.1 widgetsnbextension==3.6.10 zipp==3.6.0
name: serpent-tools channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.6.2 - argon2-cffi==21.3.0 - argon2-cffi-bindings==21.2.0 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - backcall==0.2.0 - bleach==4.1.0 - cffi==1.15.1 - charset-normalizer==2.0.12 - comm==0.1.4 - contextvars==2.4 - coverage==6.2 - cycler==0.11.0 - dataclasses==0.8 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - execnet==1.9.0 - flake8==5.0.4 - idna==3.10 - immutables==0.19 - importlib-metadata==4.2.0 - iniconfig==1.1.1 - ipykernel==5.5.6 - ipython==7.16.3 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jedi==0.17.2 - jinja2==3.0.3 - json5==0.9.16 - jsonschema==3.2.0 - jupyter==1.1.1 - jupyter-client==7.1.2 - jupyter-console==6.4.3 - jupyter-core==4.9.2 - jupyter-server==1.13.1 - jupyterlab==3.2.9 - jupyterlab-pygments==0.1.2 - jupyterlab-server==2.10.3 - jupyterlab-widgets==1.1.11 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.7.0 - mistune==0.8.4 - nbclassic==0.3.5 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nest-asyncio==1.6.0 - notebook==6.4.10 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - parso==0.7.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pillow==8.4.0 - pluggy==1.0.0 - prometheus-client==0.17.1 - prompt-toolkit==3.0.36 - ptyprocess==0.7.0 - py==1.11.0 - pycodestyle==2.9.1 - pycparser==2.21 - pyflakes==2.5.0 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - send2trash==1.8.3 - six==1.17.0 - sniffio==1.2.0 - terminado==0.12.1 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - wcwidth==0.2.13 - webencodings==0.5.1 - websocket-client==1.3.1 - widgetsnbextension==3.6.10 - zipp==3.6.0 prefix: /opt/conda/envs/serpent-tools
[ "serpentTools/tests/test_depSampler.py::DepletionSamplerFailTester::test_badInventory", "serpentTools/tests/test_depSampler.py::DepletionSamplerFailTester::test_missingTimeSteps", "serpentTools/tests/test_depSampler.py::DepletedSamplerTester::test_depSamplerValidCalcs", "serpentTools/tests/test_depSampler.py::DepletedSamplerTester::test_getitem", "serpentTools/tests/test_detSampler.py::DetSamplerTester::test_differentSizedDetectors", "serpentTools/tests/test_detSampler.py::DetSamplerTester::test_getitem", "serpentTools/tests/test_detSampler.py::DetSamplerTester::test_missingDetectors", "serpentTools/tests/test_detSampler.py::DetSamplerTester::test_properlyAveraged", "serpentTools/tests/test_messages.py::DecoratorTester::test_deprecatedDecorator", "serpentTools/tests/test_messages.py::DecoratorTester::test_futureDecorator", "serpentTools/tests/test_messages.py::LoggingTester::test_addRemoveHandlers", "serpentTools/tests/test_messages.py::LoggingTester::test_keyInLogs", "serpentTools/tests/test_messages.py::LoggingTester::test_logger", "serpentTools/tests/test_settings.py::DefaultSettingsTester::test_cannotChangeDefaults", "serpentTools/tests/test_settings.py::DefaultSettingsTester::test_getDefault", "serpentTools/tests/test_settings.py::RCTester::test_expandExtras", "serpentTools/tests/test_settings.py::RCTester::test_failAtBadSetting_options", "serpentTools/tests/test_settings.py::RCTester::test_failAtBadSettings_type", "serpentTools/tests/test_settings.py::RCTester::test_failAtNonexistentSetting", "serpentTools/tests/test_settings.py::RCTester::test_readerWithUpdatedSettings", "serpentTools/tests/test_settings.py::RCTester::test_returnReaderSettings", "serpentTools/tests/test_settings.py::ConfigLoaderTester::test_loadNestedConfig", "serpentTools/tests/test_settings.py::ConfigLoaderTester::test_loadNestedNonStrict", "serpentTools/tests/test_settings.py::ConfigLoaderTester::test_loadSingleLevelConfig" ]
[ "serpentTools/tests/test_settings.py::RCTester::test_fullExtend" ]
[]
[]
MIT License
2,839
[ "docs/develop/utils.rst", "serpentTools/messages.py", "docs/develop/logging.rst" ]
[ "docs/develop/utils.rst", "serpentTools/messages.py", "docs/develop/logging.rst" ]
rabitt__pysox-80
0c87e45ab170b6e08e5ebacc9328c6053db95710
2018-07-27 22:28:10
8a6748d32b6917d5ef920895fbfc734dda21f294
coveralls: [![Coverage Status](https://coveralls.io/builds/18205419/badge)](https://coveralls.io/builds/18205419) Coverage decreased (-0.009%) to 98.746% when pulling **aeb7b9f017a703caef8b3be662624c69b35c3619 on subprocess-fix** into **0c87e45ab170b6e08e5ebacc9328c6053db95710 on master**.
diff --git a/sox/combine.py b/sox/combine.py index 8ec8155..24854a6 100644 --- a/sox/combine.py +++ b/sox/combine.py @@ -11,7 +11,6 @@ from . import file_info from . import core from .log import logger from .core import ENCODING_VALS -from .core import enquote_filepath from .core import is_number from .core import sox from .core import play @@ -90,7 +89,7 @@ class Combiner(Transformer): args.extend(input_args) args.extend(self.output_format) - args.append(enquote_filepath(output_filepath)) + args.append(output_filepath) args.extend(self.effects) status, out, err = sox(args) @@ -305,9 +304,6 @@ class Combiner(Transformer): self.input_format = input_format return self - def splice(self): - raise NotImplementedError - def _validate_file_formats(input_filepath_list, combine_type): '''Validate that combine method can be performed with given files. @@ -436,7 +432,7 @@ def _build_input_args(input_filepath_list, input_format_list): zipped = zip(input_filepath_list, input_format_list) for input_file, input_fmt in zipped: input_args.extend(input_fmt) - input_args.append(enquote_filepath(input_file)) + input_args.append(input_file) return input_args diff --git a/sox/core.py b/sox/core.py index 118cd4a..1832c81 100644 --- a/sox/core.py +++ b/sox/core.py @@ -14,14 +14,6 @@ ENCODING_VALS = [ ] -def enquote_filepath(fpath): - """Wrap a filepath in double-quotes to protect difficult characters. - """ - if ' ' in fpath: - fpath = '"{}"'.format(fpath.strip("'").strip('"')) - return fpath - - def sox(args): '''Pass an argument list to SoX. @@ -43,12 +35,10 @@ def sox(args): args[0] = "sox" try: - command = ' '.join(args) - logger.info("Executing: %s", command) + logger.info("Executing: %s", ' '.join(args)) process_handle = subprocess.Popen( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=True + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = process_handle.communicate() @@ -85,7 +75,7 @@ def _get_valid_formats(): if NO_SOX: return [] - so = subprocess.check_output('sox -h', shell=True) + so = subprocess.check_output(['sox', '-h']) if type(so) is not str: so = str(so, encoding='UTF-8') so = so.split('\n') @@ -118,14 +108,14 @@ def soxi(filepath, argument): if argument not in SOXI_ARGS: raise ValueError("Invalid argument '{}' to SoXI".format(argument)) - args = ['sox --i'] + args = ['sox', '--i'] args.append("-{}".format(argument)) - args.append(enquote_filepath(filepath)) + args.append(filepath) try: shell_output = subprocess.check_output( - " ".join(args), - shell=True, stderr=subprocess.PIPE + args, + stderr=subprocess.PIPE ) except CalledProcessError as cpe: logger.info("SoXI error message: {}".format(cpe.output)) @@ -181,7 +171,6 @@ def play(args): class SoxiError(Exception): '''Exception to be raised when SoXI exits with non-zero status. ''' - def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) diff --git a/sox/file_info.py b/sox/file_info.py index 9077f1a..7f0356c 100644 --- a/sox/file_info.py +++ b/sox/file_info.py @@ -7,7 +7,6 @@ import os from .core import VALID_FORMATS from .core import soxi from .core import sox -from .core import enquote_filepath def bitrate(input_filepath): @@ -363,7 +362,7 @@ def _stat_call(filepath): Sox output from stderr. ''' validate_input_file(filepath) - args = ['sox', enquote_filepath(filepath), '-n', 'stat'] + args = ['sox', filepath, '-n', 'stat'] _, _, stat_output = sox(args) return stat_output diff --git a/sox/transform.py b/sox/transform.py index df4609e..de13675 100644 --- a/sox/transform.py +++ b/sox/transform.py @@ -12,7 +12,6 @@ import random import os from .core import ENCODING_VALS -from .core import enquote_filepath from .core import is_number from .core import play from .core import sox @@ -411,11 +410,9 @@ class Transformer(object): ''' file_info.validate_input_file(input_filepath) - input_filepath = enquote_filepath(input_filepath) if output_filepath is not None: file_info.validate_output_file(output_filepath) - output_filepath = enquote_filepath(output_filepath) else: output_filepath = '-n' @@ -1857,7 +1854,7 @@ class Transformer(object): if gain[i] is not None: intermed_args.append("{:f}".format(gain[i])) - effect_args.append('"{}"'.format(' '.join(intermed_args))) + effect_args.append(' '.join(intermed_args)) self.effects.extend(effect_args) self.effects_log.append('mcompand') @@ -1882,15 +1879,17 @@ class Transformer(object): ''' if os.path.isdir(profile_path): - raise ValueError("profile_path {} is a directory, but filename should be specified.") - + raise ValueError( + "profile_path {} is a directory.".format(profile_path)) + if os.path.dirname(profile_path) == '' and profile_path != '': _abs_profile_path = os.path.join(os.getcwd(), profile_path) else: _abs_profile_path = profile_path - + if not os.access(os.path.dirname(_abs_profile_path), os.W_OK): - raise IOError("profile_path {} is not writeable.".format(_abs_profile_path)) + raise IOError( + "profile_path {} is not writeable.".format(_abs_profile_path)) effect_args = ['noiseprof', profile_path] self.build(input_filepath, None, extra_args=effect_args) @@ -1920,7 +1919,8 @@ class Transformer(object): ''' if not os.path.exists(profile_path): - raise IOError("profile_path {} does not exist.".format(profile_path)) + raise IOError( + "profile_path {} does not exist.".format(profile_path)) if not is_number(amount) or amount < 0 or amount > 1: raise ValueError("amount must be a number between 0 and 1.") @@ -2208,7 +2208,8 @@ class Transformer(object): >>> tfm.remix(remix_dictionary) ''' - if not (isinstance(remix_dictionary, dict) or remix_dictionary is None): + if not (isinstance(remix_dictionary, dict) or + remix_dictionary is None): raise ValueError("remix_dictionary must be a dictionary or None.") if remix_dictionary is not None: @@ -3118,7 +3119,7 @@ class Transformer(object): ---------- gain : float Interpreted according to the given `gain_type`. - If `gain_type' = 'amplitude', `gain' is a (positive) amplitude ratio. + If `gain_type' = 'amplitude', `gain' is a positive amplitude ratio. If `gain_type' = 'power', `gain' is a power (voltage squared). If `gain_type' = 'db', `gain' is in decibels. gain_type : string, default='amplitude' @@ -3160,7 +3161,7 @@ class Transformer(object): elif gain_type == 'db': effect_args.append('dB') else: - raise ValueError('gain_type must be one of amplitude, power, or db') + raise ValueError('gain_type must be one of amplitude power or db') if limiter_gain is not None: if gain_type in ['amplitude', 'power'] and gain > 1: diff --git a/sox/version.py b/sox/version.py index ad0d588..b718b9b 100644 --- a/sox/version.py +++ b/sox/version.py @@ -3,4 +3,4 @@ """Version info""" short_version = '1.3' -version = '1.3.5' +version = '1.3.6'
Base call to SoX uses shell=True in subprocess call Filenames with wildcards have always behaved inconsistently with pysox. Looking into this more closely, found that - the SoX command arguments being passed to the shell rather than to the binary directly. Thanks @jongwook for finding the source!
rabitt/pysox
diff --git a/tests/test_transform.py b/tests/test_transform.py index 1f7795a..f611ee4 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -2340,18 +2340,17 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000"', + '0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000', '1600.000000', - '"0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' - '-15.000000,-33.000000,0.000000,0.000000"' + '0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' + '-15.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) actual_log = tfm.effects_log expected_log = ['mcompand'] self.assertEqual(expected_log, actual_log) - actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) @@ -2369,8 +2368,8 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000"' + '0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) @@ -2390,11 +2389,11 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000"', + '0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000', '100.000000', - '"0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' - '-15.000000,-33.000000,0.000000,0.000000"' + '0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' + '-15.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) @@ -2419,11 +2418,11 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.500000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000"', + '0.500000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000', '1600.000000', - '"0.062500,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' - '-15.000000,-33.000000,0.000000,0.000000"' + '0.062500,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' + '-15.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) @@ -2458,11 +2457,11 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.001000 6.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000"', + '0.005000,0.001000 6.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000', '1600.000000', - '"0.000625,0.500000 -47.000000,-40.000000,-34.000000,-34.000000,' - '-15.000000,-33.000000,0.000000,0.000000"' + '0.000625,0.500000 -47.000000,-40.000000,-34.000000,-34.000000,' + '-15.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) @@ -2497,11 +2496,11 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 -2.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000"', + '0.005000,0.100000 -2.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000', '1600.000000', - '"0.000625,0.012500 -5.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-15.000000,-33.000000,0.000000,0.000000"' + '0.000625,0.012500 -5.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-15.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) @@ -2516,11 +2515,11 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 -47.000000,-40.000000,-34.000000,-34.000000,' - '-17.000000,-33.000000,0.000000,0.000000"', + '0.005000,0.100000 -47.000000,-40.000000,-34.000000,-34.000000,' + '-17.000000,-33.000000,0.000000,0.000000', '1600.000000', - '"0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' - '-15.000000,-33.000000,0.000000,0.000000"' + '0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' + '-15.000000,-33.000000,0.000000,0.000000' ] self.assertEqual(expected_args, actual_args) @@ -2555,10 +2554,10 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 6.000000:-70.000000,-60.000000,-60.000000,' - '-20.000000,-40.000000,-40.000000,0.000000,-4.000000"', + '0.005000,0.100000 6.000000:-70.000000,-60.000000,-60.000000,' + '-20.000000,-40.000000,-40.000000,0.000000,-4.000000', '1600.000000', - '"0.000625,0.012500 -70.000000,-60.000000,0.000000,-4.000000"' + '0.000625,0.012500 -70.000000,-60.000000,0.000000,-4.000000' ] self.assertEqual(expected_args, actual_args) @@ -2622,11 +2621,11 @@ class TestTransformerMcompand(unittest.TestCase): actual_args = tfm.effects expected_args = [ 'mcompand', - '"0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' - '-34.000000,-17.000000,-33.000000,0.000000,0.000000 3.000000"', + '0.005000,0.100000 6.000000:-47.000000,-40.000000,-34.000000,' + '-34.000000,-17.000000,-33.000000,0.000000,0.000000 3.000000', '1600.000000', - '"0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' - '-15.000000,-33.000000,0.000000,0.000000 -1.000000"' + '0.000625,0.012500 -47.000000,-40.000000,-34.000000,-34.000000,' + '-15.000000,-33.000000,0.000000,0.000000 -1.000000' ] self.assertEqual(expected_args, actual_args) @@ -2650,7 +2649,7 @@ class TestTransformerMcompand(unittest.TestCase): class TestTransformerNoiseprof(unittest.TestCase): - + def test_default(self): tfm = new_transformer() save_path = os.path.join(os.getcwd(), 'noise.prof') @@ -2673,7 +2672,7 @@ class TestTransformerNoiseprof(unittest.TestCase): tfm = new_transformer() with self.assertRaises(ValueError): tfm.noiseprof(INPUT_FILE, os.getcwd()) - + def test_noise_prof_invalid_write(self): tfm = new_transformer() with self.assertRaises(IOError): @@ -2689,7 +2688,7 @@ class TestTransformerNoiseprof(unittest.TestCase): class TestTransformerNoisered(unittest.TestCase): - + def test_default(self): tfm = new_transformer() tfm.noisered(NOISE_PROF_FILE) @@ -2709,7 +2708,7 @@ class TestTransformerNoisered(unittest.TestCase): actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) - + def test_noise_prof_valid(self): tfm = new_transformer() tfm.noisered(NOISE_PROF_FILE) @@ -2738,10 +2737,9 @@ class TestTransformerNoisered(unittest.TestCase): ) actual_args = tfm.effects - expected_args = ['noisered', - NOISE_PROF_FILE, - '0.700000' - ] + expected_args = [ + 'noisered', NOISE_PROF_FILE, '0.700000' + ] self.assertEqual(expected_args, actual_args) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) @@ -4083,7 +4081,6 @@ class TestTransformerPowerSpectrum(unittest.TestCase): self.assertEqual(expected_last, actual[-1]) - class TestTransformerStats(unittest.TestCase): def test_default(self): @@ -4131,6 +4128,7 @@ class TestTransformerStats(unittest.TestCase): } self.assertEqual(expected, actual) + class TestTransformerSwap(unittest.TestCase): def test_default(self): @@ -4574,7 +4572,7 @@ class TestTransformerVol(unittest.TestCase): actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True - self.assertEqual(expected_res, actual_res) + self.assertEqual(expected_res, actual_res) def test_limiter_gain_vol_down(self): tfm = new_transformer()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 5 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[tests]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y sox" ], "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 execnet==1.9.0 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pep8==1.7.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cache==1.0 pytest-cov==4.0.0 pytest-pep8==1.0.6 -e git+https://github.com/rabitt/pysox.git@0c87e45ab170b6e08e5ebacc9328c6053db95710#egg=sox tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: pysox channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - execnet==1.9.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pep8==1.7.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cache==1.0 - pytest-cov==4.0.0 - pytest-pep8==1.0.6 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/pysox
[ "tests/test_transform.py::TestTransformerMcompand::test_attack_time_valid", "tests/test_transform.py::TestTransformerMcompand::test_crossover_frequencies_valid", "tests/test_transform.py::TestTransformerMcompand::test_decay_time_valid", "tests/test_transform.py::TestTransformerMcompand::test_default", "tests/test_transform.py::TestTransformerMcompand::test_gain_valid", "tests/test_transform.py::TestTransformerMcompand::test_n_bands_valid", "tests/test_transform.py::TestTransformerMcompand::test_soft_knee_none", "tests/test_transform.py::TestTransformerMcompand::test_soft_knee_valid", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_valid" ]
[ "tests/test_transform.py::TestTransformerNoiseprof::test_noise_prof_invalid_cwd", "tests/test_transform.py::TestTransformerNoiseprof::test_noise_prof_invalid_write", "tests/test_transform.py::TestTransformerNoisered::test_noise_prof_invalid" ]
[ "tests/test_transform.py::TestTransformDefault::test_effects", "tests/test_transform.py::TestTransformDefault::test_effects_log", "tests/test_transform.py::TestTransformDefault::test_globals", "tests/test_transform.py::TestTransformDefault::test_input_format", "tests/test_transform.py::TestTransformDefault::test_output_format", "tests/test_transform.py::TestTransformSetGlobals::test_defaults", "tests/test_transform.py::TestTransformSetGlobals::test_dither", "tests/test_transform.py::TestTransformSetGlobals::test_dither_invalid", "tests/test_transform.py::TestTransformSetGlobals::test_guard", "tests/test_transform.py::TestTransformSetGlobals::test_guard_invalid", "tests/test_transform.py::TestTransformSetGlobals::test_multithread", "tests/test_transform.py::TestTransformSetGlobals::test_multithread_invalid", "tests/test_transform.py::TestTransformSetGlobals::test_replay_gain", "tests/test_transform.py::TestTransformSetGlobals::test_replay_gain_invalid", "tests/test_transform.py::TestTransformSetGlobals::test_verbosity", "tests/test_transform.py::TestTransformSetGlobals::test_verbosity_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_bits", "tests/test_transform.py::TestTransformSetInputFormat::test_bits_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_bits_invalid2", "tests/test_transform.py::TestTransformSetInputFormat::test_channels", "tests/test_transform.py::TestTransformSetInputFormat::test_channels_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_channels_invalid2", "tests/test_transform.py::TestTransformSetInputFormat::test_defaults", "tests/test_transform.py::TestTransformSetInputFormat::test_encoding", "tests/test_transform.py::TestTransformSetInputFormat::test_encoding_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_file_type", "tests/test_transform.py::TestTransformSetInputFormat::test_file_type_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_ignore_length", "tests/test_transform.py::TestTransformSetInputFormat::test_ignore_length_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_rate", "tests/test_transform.py::TestTransformSetInputFormat::test_rate_invalid", "tests/test_transform.py::TestTransformSetInputFormat::test_rate_invalid2", "tests/test_transform.py::TestTransformSetInputFormat::test_rate_scinotation", "tests/test_transform.py::TestTransformSetOutputFormat::test_append_comments", "tests/test_transform.py::TestTransformSetOutputFormat::test_append_comments_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_bits", "tests/test_transform.py::TestTransformSetOutputFormat::test_bits_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_bits_invalid2", "tests/test_transform.py::TestTransformSetOutputFormat::test_channels", "tests/test_transform.py::TestTransformSetOutputFormat::test_channels_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_channels_invalid2", "tests/test_transform.py::TestTransformSetOutputFormat::test_comments", "tests/test_transform.py::TestTransformSetOutputFormat::test_comments_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_defaults", "tests/test_transform.py::TestTransformSetOutputFormat::test_encoding", "tests/test_transform.py::TestTransformSetOutputFormat::test_encoding_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_file_type", "tests/test_transform.py::TestTransformSetOutputFormat::test_file_type_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_file_type_null_output", "tests/test_transform.py::TestTransformSetOutputFormat::test_rate", "tests/test_transform.py::TestTransformSetOutputFormat::test_rate_invalid", "tests/test_transform.py::TestTransformSetOutputFormat::test_rate_invalid2", "tests/test_transform.py::TestTransformSetOutputFormat::test_rate_scinotation", "tests/test_transform.py::TestTransformerBuild::test_extra_arg", "tests/test_transform.py::TestTransformerBuild::test_extra_args_invalid", "tests/test_transform.py::TestTransformerBuild::test_failed_sox", "tests/test_transform.py::TestTransformerBuild::test_input_output_equal", "tests/test_transform.py::TestTransformerBuild::test_invalid", "tests/test_transform.py::TestTransformerBuild::test_null_output", "tests/test_transform.py::TestTransformerBuild::test_return_outputs", "tests/test_transform.py::TestTransformerBuild::test_return_outputs_err", "tests/test_transform.py::TestTransformerBuild::test_valid", "tests/test_transform.py::TestTransformerBuild::test_valid_spacey", "tests/test_transform.py::TestTransformerClearEffects::test_clear", "tests/test_transform.py::TestTransformerPreview::test_valid", "tests/test_transform.py::TestTransformerAllpass::test_default", "tests/test_transform.py::TestTransformerAllpass::test_frequency_invalid", "tests/test_transform.py::TestTransformerAllpass::test_width_q_invalid", "tests/test_transform.py::TestTransformerBandpass::test_constant_skirt", "tests/test_transform.py::TestTransformerBandpass::test_constant_skirt_invalid", "tests/test_transform.py::TestTransformerBandpass::test_default", "tests/test_transform.py::TestTransformerBandpass::test_frequency_invalid", "tests/test_transform.py::TestTransformerBandpass::test_width_q_invalid", "tests/test_transform.py::TestTransformerBandreject::test_default", "tests/test_transform.py::TestTransformerBandreject::test_frequency_invalid", "tests/test_transform.py::TestTransformerBandreject::test_width_q_invalid", "tests/test_transform.py::TestTransformerBass::test_default", "tests/test_transform.py::TestTransformerBass::test_frequency_invalid", "tests/test_transform.py::TestTransformerBass::test_gain_db_invalid", "tests/test_transform.py::TestTransformerBass::test_slope_invalid", "tests/test_transform.py::TestTransformerBend::test_cents_invalid_len", "tests/test_transform.py::TestTransformerBend::test_cents_invalid_nonlist", "tests/test_transform.py::TestTransformerBend::test_cents_invalid_vals", "tests/test_transform.py::TestTransformerBend::test_default", "tests/test_transform.py::TestTransformerBend::test_end_times_invalid_len", "tests/test_transform.py::TestTransformerBend::test_end_times_invalid_nonlist", "tests/test_transform.py::TestTransformerBend::test_end_times_invalid_order", "tests/test_transform.py::TestTransformerBend::test_end_times_invalid_vals", "tests/test_transform.py::TestTransformerBend::test_frame_rate_invalid", "tests/test_transform.py::TestTransformerBend::test_frame_rate_valid", "tests/test_transform.py::TestTransformerBend::test_n_bends_invalid", "tests/test_transform.py::TestTransformerBend::test_overlapping_intervals", "tests/test_transform.py::TestTransformerBend::test_oversample_rate_invalid", "tests/test_transform.py::TestTransformerBend::test_oversample_rate_valid", "tests/test_transform.py::TestTransformerBend::test_start_greater_end", "tests/test_transform.py::TestTransformerBend::test_start_times_invalid_len", "tests/test_transform.py::TestTransformerBend::test_start_times_invalid_nonlist", "tests/test_transform.py::TestTransformerBend::test_start_times_invalid_order", "tests/test_transform.py::TestTransformerBend::test_start_times_invalid_vals", "tests/test_transform.py::TestTransformerBiquad::test_a_non_num", "tests/test_transform.py::TestTransformerBiquad::test_a_nonlist", "tests/test_transform.py::TestTransformerBiquad::test_a_wrong_len", "tests/test_transform.py::TestTransformerBiquad::test_b_non_num", "tests/test_transform.py::TestTransformerBiquad::test_b_nonlist", "tests/test_transform.py::TestTransformerBiquad::test_b_wrong_len", "tests/test_transform.py::TestTransformerBiquad::test_default", "tests/test_transform.py::TestTransformerChannels::test_default", "tests/test_transform.py::TestTransformerChannels::test_invalid_nchannels", "tests/test_transform.py::TestTransformerChorus::test_default", "tests/test_transform.py::TestTransformerChorus::test_explicit_args", "tests/test_transform.py::TestTransformerChorus::test_invalid_decays", "tests/test_transform.py::TestTransformerChorus::test_invalid_decays_vals", "tests/test_transform.py::TestTransformerChorus::test_invalid_decays_wronglen", "tests/test_transform.py::TestTransformerChorus::test_invalid_delays", "tests/test_transform.py::TestTransformerChorus::test_invalid_delays_vals", "tests/test_transform.py::TestTransformerChorus::test_invalid_delays_wronglen", "tests/test_transform.py::TestTransformerChorus::test_invalid_depths", "tests/test_transform.py::TestTransformerChorus::test_invalid_depths_vals", "tests/test_transform.py::TestTransformerChorus::test_invalid_depths_wronglen", "tests/test_transform.py::TestTransformerChorus::test_invalid_gain_in", "tests/test_transform.py::TestTransformerChorus::test_invalid_gain_out", "tests/test_transform.py::TestTransformerChorus::test_invalid_n_voices", "tests/test_transform.py::TestTransformerChorus::test_invalid_shapes", "tests/test_transform.py::TestTransformerChorus::test_invalid_shapes_vals", "tests/test_transform.py::TestTransformerChorus::test_invalid_shapes_wronglen", "tests/test_transform.py::TestTransformerChorus::test_invalid_speeds", "tests/test_transform.py::TestTransformerChorus::test_invalid_speeds_vals", "tests/test_transform.py::TestTransformerChorus::test_invalid_speeds_wronglen", "tests/test_transform.py::TestTransformerContrast::test_default", "tests/test_transform.py::TestTransformerContrast::test_invalid_amount_big", "tests/test_transform.py::TestTransformerContrast::test_invalid_amount_neg", "tests/test_transform.py::TestTransformerContrast::test_invalid_amount_nonnum", "tests/test_transform.py::TestTransformerCompand::test_attack_bigger_decay", "tests/test_transform.py::TestTransformerCompand::test_attack_time_invalid_neg", "tests/test_transform.py::TestTransformerCompand::test_attack_time_invalid_nonnum", "tests/test_transform.py::TestTransformerCompand::test_attack_time_valid", "tests/test_transform.py::TestTransformerCompand::test_decay_time_invalid_neg", "tests/test_transform.py::TestTransformerCompand::test_decay_time_invalid_nonnum", "tests/test_transform.py::TestTransformerCompand::test_decay_time_valid", "tests/test_transform.py::TestTransformerCompand::test_default", "tests/test_transform.py::TestTransformerCompand::test_soft_knee_invalid", "tests/test_transform.py::TestTransformerCompand::test_soft_knee_none", "tests/test_transform.py::TestTransformerCompand::test_soft_knee_valid", "tests/test_transform.py::TestTransformerCompand::test_tf_points_empty", "tests/test_transform.py::TestTransformerCompand::test_tf_points_nonlist", "tests/test_transform.py::TestTransformerCompand::test_tf_points_nontuples", "tests/test_transform.py::TestTransformerCompand::test_tf_points_tup_dups", "tests/test_transform.py::TestTransformerCompand::test_tf_points_tup_len", "tests/test_transform.py::TestTransformerCompand::test_tf_points_tup_nonnum", "tests/test_transform.py::TestTransformerCompand::test_tf_points_tup_nonnum2", "tests/test_transform.py::TestTransformerCompand::test_tf_points_tup_positive", "tests/test_transform.py::TestTransformerCompand::test_tf_points_valid", "tests/test_transform.py::TestTransformerConvert::test_bitdepth_invalid", "tests/test_transform.py::TestTransformerConvert::test_bitdepth_valid", "tests/test_transform.py::TestTransformerConvert::test_channels_invalid1", "tests/test_transform.py::TestTransformerConvert::test_channels_invalid2", "tests/test_transform.py::TestTransformerConvert::test_channels_valid", "tests/test_transform.py::TestTransformerConvert::test_default", "tests/test_transform.py::TestTransformerConvert::test_samplerate_invalid", "tests/test_transform.py::TestTransformerConvert::test_samplerate_valid", "tests/test_transform.py::TestTransformerDcshift::test_default", "tests/test_transform.py::TestTransformerDcshift::test_invalid_shift_big", "tests/test_transform.py::TestTransformerDcshift::test_invalid_shift_neg", "tests/test_transform.py::TestTransformerDcshift::test_invalid_shift_nonnum", "tests/test_transform.py::TestTransformerDeemph::test_default", "tests/test_transform.py::TestTransformerDelay::test_default", "tests/test_transform.py::TestTransformerDelay::test_default_three_channel", "tests/test_transform.py::TestTransformerDelay::test_invalid_position_type", "tests/test_transform.py::TestTransformerDelay::test_invalid_position_vals", "tests/test_transform.py::TestTransformerDownsample::test_default", "tests/test_transform.py::TestTransformerDownsample::test_invalid_factor_neg", "tests/test_transform.py::TestTransformerDownsample::test_invalid_factor_nonnum", "tests/test_transform.py::TestTransformerEarwax::test_default", "tests/test_transform.py::TestTransformerEcho::test_decays_invalid_len", "tests/test_transform.py::TestTransformerEcho::test_decays_invalid_type", "tests/test_transform.py::TestTransformerEcho::test_decays_invalid_vals", "tests/test_transform.py::TestTransformerEcho::test_decays_valid", "tests/test_transform.py::TestTransformerEcho::test_default", "tests/test_transform.py::TestTransformerEcho::test_delays_invalid_len", "tests/test_transform.py::TestTransformerEcho::test_delays_invalid_type", "tests/test_transform.py::TestTransformerEcho::test_delays_invalid_vals", "tests/test_transform.py::TestTransformerEcho::test_delays_valid", "tests/test_transform.py::TestTransformerEcho::test_gain_in_invalid", "tests/test_transform.py::TestTransformerEcho::test_gain_in_valid", "tests/test_transform.py::TestTransformerEcho::test_gain_out_invalid", "tests/test_transform.py::TestTransformerEcho::test_gain_out_valid", "tests/test_transform.py::TestTransformerEcho::test_n_echos_invalid", "tests/test_transform.py::TestTransformerEcho::test_n_echos_valid", "tests/test_transform.py::TestTransformerEchos::test_decays_invalid_len", "tests/test_transform.py::TestTransformerEchos::test_decays_invalid_type", "tests/test_transform.py::TestTransformerEchos::test_decays_invalid_vals", "tests/test_transform.py::TestTransformerEchos::test_decays_valid", "tests/test_transform.py::TestTransformerEchos::test_default", "tests/test_transform.py::TestTransformerEchos::test_delays_invalid_len", "tests/test_transform.py::TestTransformerEchos::test_delays_invalid_type", "tests/test_transform.py::TestTransformerEchos::test_delays_invalid_vals", "tests/test_transform.py::TestTransformerEchos::test_delays_valid", "tests/test_transform.py::TestTransformerEchos::test_gain_in_invalid", "tests/test_transform.py::TestTransformerEchos::test_gain_in_valid", "tests/test_transform.py::TestTransformerEchos::test_gain_out_invalid", "tests/test_transform.py::TestTransformerEchos::test_gain_out_valid", "tests/test_transform.py::TestTransformerEchos::test_n_echos_invalid", "tests/test_transform.py::TestTransformerEchos::test_n_echos_valid", "tests/test_transform.py::TestTransformerEqualizer::test_default", "tests/test_transform.py::TestTransformerEqualizer::test_frequency_invalid", "tests/test_transform.py::TestTransformerEqualizer::test_gain_db_invalid", "tests/test_transform.py::TestTransformerEqualizer::test_width_q_invalid", "tests/test_transform.py::TestTransformerFade::test_default", "tests/test_transform.py::TestTransformerFade::test_fade_in_invalid", "tests/test_transform.py::TestTransformerFade::test_fade_in_valid", "tests/test_transform.py::TestTransformerFade::test_fade_out_invalid", "tests/test_transform.py::TestTransformerFade::test_fade_out_valid", "tests/test_transform.py::TestTransformerFade::test_fade_shape_invalid", "tests/test_transform.py::TestTransformerFade::test_fade_shape_valid", "tests/test_transform.py::TestTransformerFir::test_default", "tests/test_transform.py::TestTransformerFir::test_invalid_coeffs_nonlist", "tests/test_transform.py::TestTransformerFir::test_invalid_coeffs_vals", "tests/test_transform.py::TestTransformerFlanger::test_default", "tests/test_transform.py::TestTransformerFlanger::test_flanger_delay_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_delay_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_depth_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_depth_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_interp_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_interp_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_phase_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_phase_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_regen_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_regen_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_shape_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_shape_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_speed_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_speed_valid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_width_invalid", "tests/test_transform.py::TestTransformerFlanger::test_flanger_width_valid", "tests/test_transform.py::TestTransformerGain::test_balance_invalid", "tests/test_transform.py::TestTransformerGain::test_balance_valid", "tests/test_transform.py::TestTransformerGain::test_default", "tests/test_transform.py::TestTransformerGain::test_gain_db_invalid", "tests/test_transform.py::TestTransformerGain::test_gain_db_valid", "tests/test_transform.py::TestTransformerGain::test_limiter_invalid", "tests/test_transform.py::TestTransformerGain::test_limiter_valid", "tests/test_transform.py::TestTransformerGain::test_normalize_invalid", "tests/test_transform.py::TestTransformerGain::test_normalize_valid", "tests/test_transform.py::TestTransformerHighpass::test_default", "tests/test_transform.py::TestTransformerHighpass::test_frequency_invalid", "tests/test_transform.py::TestTransformerHighpass::test_n_poles_invalid", "tests/test_transform.py::TestTransformerHighpass::test_one_pole", "tests/test_transform.py::TestTransformerHighpass::test_width_q_invalid", "tests/test_transform.py::TestTransformerHilbert::test_default", "tests/test_transform.py::TestTransformerHilbert::test_num_taps_invalid", "tests/test_transform.py::TestTransformerHilbert::test_num_taps_invalid_even", "tests/test_transform.py::TestTransformerHilbert::test_num_taps_valid", "tests/test_transform.py::TestTransformerLowpass::test_default", "tests/test_transform.py::TestTransformerLowpass::test_frequency_invalid", "tests/test_transform.py::TestTransformerLowpass::test_n_poles_invalid", "tests/test_transform.py::TestTransformerLowpass::test_one_pole", "tests/test_transform.py::TestTransformerLowpass::test_width_q_invalid", "tests/test_transform.py::TestTransformerLoudness::test_default", "tests/test_transform.py::TestTransformerLoudness::test_gain_db_invalid", "tests/test_transform.py::TestTransformerLoudness::test_gain_db_valid", "tests/test_transform.py::TestTransformerLoudness::test_reference_level_invalid", "tests/test_transform.py::TestTransformerLoudness::test_reference_level_oorange", "tests/test_transform.py::TestTransformerLoudness::test_reference_level_valid", "tests/test_transform.py::TestTransformerMcompand::test_attack_time_invalid_len", "tests/test_transform.py::TestTransformerMcompand::test_attack_time_invalid_neg", "tests/test_transform.py::TestTransformerMcompand::test_attack_time_invalid_nonnum", "tests/test_transform.py::TestTransformerMcompand::test_attack_time_invalid_type", "tests/test_transform.py::TestTransformerMcompand::test_crossover_frequencies_invalid", "tests/test_transform.py::TestTransformerMcompand::test_crossover_frequencies_invalid_vals", "tests/test_transform.py::TestTransformerMcompand::test_decay_time_invalid_len", "tests/test_transform.py::TestTransformerMcompand::test_decay_time_invalid_neg", "tests/test_transform.py::TestTransformerMcompand::test_decay_time_invalid_nonnum", "tests/test_transform.py::TestTransformerMcompand::test_decay_time_invalid_type", "tests/test_transform.py::TestTransformerMcompand::test_gain_len_invalid", "tests/test_transform.py::TestTransformerMcompand::test_gain_values_invalid", "tests/test_transform.py::TestTransformerMcompand::test_n_bands_invalid", "tests/test_transform.py::TestTransformerMcompand::test_soft_knee_db_invalid_len", "tests/test_transform.py::TestTransformerMcompand::test_soft_knee_db_invalid_type", "tests/test_transform.py::TestTransformerMcompand::test_soft_knee_invalid", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_elt_empty", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_elt_nonlist", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_elt_nontuples", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_elt_tup_len", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_elt_tup_nonnum", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_tup_dups", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_tup_nonnum2", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_tup_positive", "tests/test_transform.py::TestTransformerMcompand::test_tf_points_wrong_len", "tests/test_transform.py::TestTransformerNoiseprof::test_default", "tests/test_transform.py::TestTransformerNoiseprof::test_noise_prof_invalid_dir", "tests/test_transform.py::TestTransformerNoisered::test_amount_invalid", "tests/test_transform.py::TestTransformerNoisered::test_amount_valid", "tests/test_transform.py::TestTransformerNoisered::test_default", "tests/test_transform.py::TestTransformerNoisered::test_noise_prof_valid", "tests/test_transform.py::TestTransformerNorm::test_db_level_invalid", "tests/test_transform.py::TestTransformerNorm::test_db_level_valid", "tests/test_transform.py::TestTransformerNorm::test_default", "tests/test_transform.py::TestTransformerOops::test_default", "tests/test_transform.py::TestTransformerOverdrive::test_colour_invalid", "tests/test_transform.py::TestTransformerOverdrive::test_colour_valid", "tests/test_transform.py::TestTransformerOverdrive::test_default", "tests/test_transform.py::TestTransformerOverdrive::test_gain_db_invalid", "tests/test_transform.py::TestTransformerOverdrive::test_gain_db_valid", "tests/test_transform.py::TestTransformerPad::test_default", "tests/test_transform.py::TestTransformerPad::test_end_duration_invalid", "tests/test_transform.py::TestTransformerPad::test_end_duration_valid", "tests/test_transform.py::TestTransformerPad::test_start_duration_invalid", "tests/test_transform.py::TestTransformerPad::test_start_duration_valid", "tests/test_transform.py::TestTransformerPhaser::test_decay_invalid", "tests/test_transform.py::TestTransformerPhaser::test_decay_valid", "tests/test_transform.py::TestTransformerPhaser::test_default", "tests/test_transform.py::TestTransformerPhaser::test_delay_invalid", "tests/test_transform.py::TestTransformerPhaser::test_delay_valid", "tests/test_transform.py::TestTransformerPhaser::test_gain_in_invalid", "tests/test_transform.py::TestTransformerPhaser::test_gain_in_valid", "tests/test_transform.py::TestTransformerPhaser::test_gain_out_invalid", "tests/test_transform.py::TestTransformerPhaser::test_gain_out_valid", "tests/test_transform.py::TestTransformerPhaser::test_modulation_shape_invalid", "tests/test_transform.py::TestTransformerPhaser::test_modulation_shape_valid", "tests/test_transform.py::TestTransformerPhaser::test_speed_invalid", "tests/test_transform.py::TestTransformerPhaser::test_speed_valid", "tests/test_transform.py::TestTransformerPitch::test_default", "tests/test_transform.py::TestTransformerPitch::test_n_semitones_invalid", "tests/test_transform.py::TestTransformerPitch::test_n_semitones_valid", "tests/test_transform.py::TestTransformerPitch::test_n_semitones_warning", "tests/test_transform.py::TestTransformerPitch::test_quick_invalid", "tests/test_transform.py::TestTransformerPitch::test_quick_valid", "tests/test_transform.py::TestTransformerRate::test_default", "tests/test_transform.py::TestTransformerRate::test_quality_invalid", "tests/test_transform.py::TestTransformerRate::test_quality_valid", "tests/test_transform.py::TestTransformerRate::test_samplerate_invalid", "tests/test_transform.py::TestTransformerRate::test_samplerate_valid", "tests/test_transform.py::TestTransformerRemix::test_default", "tests/test_transform.py::TestTransformerRemix::test_num_channels_valid", "tests/test_transform.py::TestTransformerRemix::test_num_output_channels_invalid", "tests/test_transform.py::TestTransformerRemix::test_remix_dict_invalid", "tests/test_transform.py::TestTransformerRemix::test_remix_dict_invalid2", "tests/test_transform.py::TestTransformerRemix::test_remix_dict_invalid3", "tests/test_transform.py::TestTransformerRemix::test_remix_dict_invalid4", "tests/test_transform.py::TestTransformerRemix::test_remix_dictionary_none", "tests/test_transform.py::TestTransformerRemix::test_remix_dictionary_valid", "tests/test_transform.py::TestTransformerRepeat::test_count_invalid", "tests/test_transform.py::TestTransformerRepeat::test_count_invalid_fmt", "tests/test_transform.py::TestTransformerRepeat::test_count_valid", "tests/test_transform.py::TestTransformerRepeat::test_default", "tests/test_transform.py::TestTransformerReverb::test_default", "tests/test_transform.py::TestTransformerReverb::test_high_freq_damping_invalid", "tests/test_transform.py::TestTransformerReverb::test_high_freq_damping_valid", "tests/test_transform.py::TestTransformerReverb::test_pre_delay_invalid", "tests/test_transform.py::TestTransformerReverb::test_pre_delay_valid", "tests/test_transform.py::TestTransformerReverb::test_reverberance_invalid", "tests/test_transform.py::TestTransformerReverb::test_reverberance_valid", "tests/test_transform.py::TestTransformerReverb::test_room_scale_invalid", "tests/test_transform.py::TestTransformerReverb::test_room_scale_valid", "tests/test_transform.py::TestTransformerReverb::test_stereo_depth_invalid", "tests/test_transform.py::TestTransformerReverb::test_stereo_depth_valid", "tests/test_transform.py::TestTransformerReverb::test_wet_gain_invalid", "tests/test_transform.py::TestTransformerReverb::test_wet_gain_valid", "tests/test_transform.py::TestTransformerReverb::test_wet_only_invalid", "tests/test_transform.py::TestTransformerReverb::test_wet_only_valid", "tests/test_transform.py::TestTransformerReverse::test_default", "tests/test_transform.py::TestTransformerSilence::test_buffer_around_silence_invalid", "tests/test_transform.py::TestTransformerSilence::test_buffer_around_silence_valid", "tests/test_transform.py::TestTransformerSilence::test_default", "tests/test_transform.py::TestTransformerSilence::test_location_beginning", "tests/test_transform.py::TestTransformerSilence::test_location_end", "tests/test_transform.py::TestTransformerSilence::test_location_invalid", "tests/test_transform.py::TestTransformerSilence::test_min_silence_duration_invalid", "tests/test_transform.py::TestTransformerSilence::test_min_silence_duration_valid", "tests/test_transform.py::TestTransformerSilence::test_silence_threshold_invalid", "tests/test_transform.py::TestTransformerSilence::test_silence_threshold_invalid2", "tests/test_transform.py::TestTransformerSilence::test_silence_threshold_valid", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_invalid", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_invalid_high", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_invalid_list", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_invalid_list_len", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_invalid_number", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_invalid_reject", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_valid_float", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_valid_list", "tests/test_transform.py::TestTransformerSinc::test_cutoff_freq_valid_unordered", "tests/test_transform.py::TestTransformerSinc::test_default", "tests/test_transform.py::TestTransformerSinc::test_filter_type_invalid", "tests/test_transform.py::TestTransformerSinc::test_filter_type_valid_low", "tests/test_transform.py::TestTransformerSinc::test_filter_type_valid_pass", "tests/test_transform.py::TestTransformerSinc::test_filter_type_valid_reject", "tests/test_transform.py::TestTransformerSinc::test_phase_response_invalid", "tests/test_transform.py::TestTransformerSinc::test_phase_response_invalid_large", "tests/test_transform.py::TestTransformerSinc::test_phase_response_invalid_small", "tests/test_transform.py::TestTransformerSinc::test_phase_response_valid_high", "tests/test_transform.py::TestTransformerSinc::test_phase_response_valid_low", "tests/test_transform.py::TestTransformerSinc::test_phase_response_valid_mid", "tests/test_transform.py::TestTransformerSinc::test_stop_band_attenuation_invalid", "tests/test_transform.py::TestTransformerSinc::test_stop_band_attenuation_valid", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_invalid", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_invalid_float", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_invalid_list_elt", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_invalid_low", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_linvalid_list_len", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_valid_high", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_valid_low", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_valid_pass_float", "tests/test_transform.py::TestTransformerSinc::test_transition_bw_valid_pass_list", "tests/test_transform.py::TestTransformerSpeed::test_default", "tests/test_transform.py::TestTransformerSpeed::test_factor_invalid", "tests/test_transform.py::TestTransformerSpeed::test_factor_valid", "tests/test_transform.py::TestTransformerSpeed::test_factor_valid_extreme", "tests/test_transform.py::TestTransformerStat::test_default", "tests/test_transform.py::TestTransformerStat::test_multichannel", "tests/test_transform.py::TestTransformerStat::test_rms", "tests/test_transform.py::TestTransformerStat::test_scale", "tests/test_transform.py::TestTransformerStat::test_scale_invalid", "tests/test_transform.py::TestTransformerPowerSpectrum::test_multichannel", "tests/test_transform.py::TestTransformerPowerSpectrum::test_valid", "tests/test_transform.py::TestTransformerStats::test_default", "tests/test_transform.py::TestTransformerStats::test_multichannel", "tests/test_transform.py::TestTransformerSwap::test_default", "tests/test_transform.py::TestTransformerStretch::test_default", "tests/test_transform.py::TestTransformerStretch::test_factor_extreme", "tests/test_transform.py::TestTransformerStretch::test_factor_invalid", "tests/test_transform.py::TestTransformerStretch::test_factor_valid", "tests/test_transform.py::TestTransformerStretch::test_window_invalid", "tests/test_transform.py::TestTransformerStretch::test_window_valid", "tests/test_transform.py::TestTransformerTempo::test_audio_type_invalid", "tests/test_transform.py::TestTransformerTempo::test_audio_type_valid", "tests/test_transform.py::TestTransformerTempo::test_default", "tests/test_transform.py::TestTransformerTempo::test_factor_invalid", "tests/test_transform.py::TestTransformerTempo::test_factor_valid", "tests/test_transform.py::TestTransformerTempo::test_factor_warning", "tests/test_transform.py::TestTransformerTempo::test_quick_invalid", "tests/test_transform.py::TestTransformerTempo::test_quick_valid", "tests/test_transform.py::TestTransformerTreble::test_default", "tests/test_transform.py::TestTransformerTreble::test_frequency_invalid", "tests/test_transform.py::TestTransformerTreble::test_gain_db_invalid", "tests/test_transform.py::TestTransformerTreble::test_slope_invalid", "tests/test_transform.py::TestTransformerTremolo::test_default", "tests/test_transform.py::TestTransformerTremolo::test_depth_invalid", "tests/test_transform.py::TestTransformerTremolo::test_speed_invalid", "tests/test_transform.py::TestTransformerTrim::test_default", "tests/test_transform.py::TestTransformerTrim::test_invalid_end_time", "tests/test_transform.py::TestTransformerTrim::test_invalid_start_time", "tests/test_transform.py::TestTransformerTrim::test_invalid_time_pair", "tests/test_transform.py::TestTransformerUpsample::test_default", "tests/test_transform.py::TestTransformerUpsample::test_invalid_factor_decimal", "tests/test_transform.py::TestTransformerUpsample::test_invalid_factor_neg", "tests/test_transform.py::TestTransformerUpsample::test_invalid_factor_nonnum", "tests/test_transform.py::TestTransformerVad::test_default", "tests/test_transform.py::TestTransformerVad::test_end_location", "tests/test_transform.py::TestTransformerVad::test_invalid_activity_threshold", "tests/test_transform.py::TestTransformerVad::test_invalid_initial_pad", "tests/test_transform.py::TestTransformerVad::test_invalid_initial_search_buffer", "tests/test_transform.py::TestTransformerVad::test_invalid_location", "tests/test_transform.py::TestTransformerVad::test_invalid_max_gap", "tests/test_transform.py::TestTransformerVad::test_invalid_min_activity_duration", "tests/test_transform.py::TestTransformerVad::test_invalid_normalize", "tests/test_transform.py::TestTransformerVad::test_no_normalize", "tests/test_transform.py::TestTransformerVol::test_default", "tests/test_transform.py::TestTransformerVol::test_gain_type_db", "tests/test_transform.py::TestTransformerVol::test_gain_type_power", "tests/test_transform.py::TestTransformerVol::test_invalid_gain", "tests/test_transform.py::TestTransformerVol::test_invalid_gain_power", "tests/test_transform.py::TestTransformerVol::test_invalid_gain_type", "tests/test_transform.py::TestTransformerVol::test_invalid_limiter_gain", "tests/test_transform.py::TestTransformerVol::test_limiter_gain", "tests/test_transform.py::TestTransformerVol::test_limiter_gain_vol_down", "tests/test_transform.py::TestTransformerVol::test_limiter_gain_vol_down_db", "tests/test_transform.py::TestTransformerVol::test_limiter_gain_vol_up_db" ]
[]
BSD 3-Clause "New" or "Revised" License
2,840
[ "sox/combine.py", "sox/transform.py", "sox/core.py", "sox/version.py", "sox/file_info.py" ]
[ "sox/combine.py", "sox/transform.py", "sox/core.py", "sox/version.py", "sox/file_info.py" ]
python-trio__trio-576
fd0d8770a120a09a506ccd8a3f9969b26a22f38e
2018-07-29 10:47:28
fd0d8770a120a09a506ccd8a3f9969b26a22f38e
codecov[bot]: # [Codecov](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=h1) Report > Merging [#576](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=desc) into [master](https://codecov.io/gh/python-trio/trio/commit/fd0d8770a120a09a506ccd8a3f9969b26a22f38e?src=pr&el=desc) will **increase** coverage by `<.01%`. > The diff coverage is `100%`. [![Impacted file tree graph](https://codecov.io/gh/python-trio/trio/pull/576/graphs/tree.svg?token=Lhvc0OnSCw&src=pr&height=150&width=650)](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=tree) ```diff @@ Coverage Diff @@ ## master #576 +/- ## ========================================== + Coverage 99.27% 99.27% +<.01% ========================================== Files 89 89 Lines 10628 10640 +12 Branches 747 747 ========================================== + Hits 10551 10563 +12 Misses 59 59 Partials 18 18 ``` | [Impacted Files](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=tree) | Coverage Δ | | |---|---|---| | [trio/\_sync.py](https://codecov.io/gh/python-trio/trio/pull/576/diff?src=pr&el=tree#diff-dHJpby9fc3luYy5weQ==) | `100% <100%> (ø)` | :arrow_up: | | [trio/tests/test\_sync.py](https://codecov.io/gh/python-trio/trio/pull/576/diff?src=pr&el=tree#diff-dHJpby90ZXN0cy90ZXN0X3N5bmMucHk=) | `100% <100%> (ø)` | :arrow_up: | ------ [Continue to review full report at Codecov](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=continue). > **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta) > `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data` > Powered by [Codecov](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=footer). Last update [fd0d877...26d4073](https://codecov.io/gh/python-trio/trio/pull/576?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
diff --git a/newsfragments/548.bugfix.rst b/newsfragments/548.bugfix.rst new file mode 100644 index 00000000..90c549af --- /dev/null +++ b/newsfragments/548.bugfix.rst @@ -0,0 +1,1 @@ +Fix a memory leak in :class:`trio.CapacityLimiter`, that could occurr when ``acquire`` or ``acquire_on_behalf_of`` was cancelled. diff --git a/trio/_sync.py b/trio/_sync.py index fe806dd3..a99f75a3 100644 --- a/trio/_sync.py +++ b/trio/_sync.py @@ -288,7 +288,11 @@ class CapacityLimiter: except _core.WouldBlock: task = _core.current_task() self._pending_borrowers[task] = borrower - await self._lot.park() + try: + await self._lot.park() + except _core.Cancelled: + self._pending_borrowers.pop(task) + raise except: await _core.cancel_shielded_checkpoint() raise
Check CapacityLimiter's handling of _pending_borrowers I was just reading the code for an unrelated reason, and realized that this code looks very suspicious: https://github.com/python-trio/trio/blob/65729b121c55ff0aa1f5d43bd9cdcd6a81e54f0c/trio/_sync.py#L286-L296 Specifically, if the call to `await self._lot.park()` gets cancelled, it looks like we don't clear `self._pending_borrowers[task]`, which could be a memory leak (since it pins task objects in memory).
python-trio/trio
diff --git a/trio/tests/test_sync.py b/trio/tests/test_sync.py index bded1b05..64aa3235 100644 --- a/trio/tests/test_sync.py +++ b/trio/tests/test_sync.py @@ -147,6 +147,21 @@ async def test_CapacityLimiter_change_total_tokens(): assert c.statistics().tasks_waiting == 0 +# regression test for issue #548 +async def test_CapacityLimiter_memleak_548(): + limiter = CapacityLimiter(total_tokens=1) + await limiter.acquire() + + async with _core.open_nursery() as n: + n.start_soon(limiter.acquire) + await wait_all_tasks_blocked() # give it a chance to run the task + n.cancel_scope.cancel() + + # if this is 1, the acquire call (despite being killed) is still there in the task, and will + # leak memory all the while the limiter is active + assert len(limiter._pending_borrowers) == 0 + + async def test_Semaphore(): with pytest.raises(TypeError): Semaphore(1.0)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asttokens==3.0.0 async-generator==1.10 attrs==25.3.0 cffi==1.17.1 coverage==7.8.0 cryptography==44.0.2 decorator==5.2.1 exceptiongroup==1.2.2 executing==2.2.0 idna==3.10 iniconfig==2.1.0 ipython==8.18.1 jedi==0.19.2 matplotlib-inline==0.1.7 outcome==1.3.0.post0 packaging==24.2 parso==0.8.4 pexpect==4.9.0 pluggy==1.5.0 prompt_toolkit==3.0.50 ptyprocess==0.7.0 pure_eval==0.2.3 pycparser==2.22 Pygments==2.19.1 pyOpenSSL==25.0.0 pytest==8.3.5 pytest-cov==6.0.0 pytest-faulthandler==2.0.1 sortedcontainers==2.4.0 stack-data==0.6.3 tomli==2.2.1 traitlets==5.14.3 -e git+https://github.com/python-trio/trio.git@fd0d8770a120a09a506ccd8a3f9969b26a22f38e#egg=trio trustme==1.2.1 typing_extensions==4.13.0 wcwidth==0.2.13
name: trio channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asttokens==3.0.0 - async-generator==1.10 - attrs==25.3.0 - cffi==1.17.1 - coverage==7.8.0 - cryptography==44.0.2 - decorator==5.2.1 - exceptiongroup==1.2.2 - executing==2.2.0 - idna==3.10 - iniconfig==2.1.0 - ipython==8.18.1 - jedi==0.19.2 - matplotlib-inline==0.1.7 - outcome==1.3.0.post0 - packaging==24.2 - parso==0.8.4 - pexpect==4.9.0 - pluggy==1.5.0 - prompt-toolkit==3.0.50 - ptyprocess==0.7.0 - pure-eval==0.2.3 - pycparser==2.22 - pygments==2.19.1 - pyopenssl==25.0.0 - pytest==8.3.5 - pytest-cov==6.0.0 - pytest-faulthandler==2.0.1 - sortedcontainers==2.4.0 - stack-data==0.6.3 - tomli==2.2.1 - traitlets==5.14.3 - trio==0.5.0+dev - trustme==1.2.1 - typing-extensions==4.13.0 - wcwidth==0.2.13 prefix: /opt/conda/envs/trio
[ "trio/tests/test_sync.py::test_CapacityLimiter_memleak_548" ]
[]
[ "trio/tests/test_sync.py::test_Event", "trio/tests/test_sync.py::test_CapacityLimiter", "trio/tests/test_sync.py::test_CapacityLimiter_change_total_tokens", "trio/tests/test_sync.py::test_Semaphore", "trio/tests/test_sync.py::test_Semaphore_bounded", "trio/tests/test_sync.py::test_Lock_and_StrictFIFOLock[Lock]", "trio/tests/test_sync.py::test_Lock_and_StrictFIFOLock[StrictFIFOLock]", "trio/tests/test_sync.py::test_Condition", "trio/tests/test_sync.py::test_Queue", "trio/tests/test_sync.py::test_553", "trio/tests/test_sync.py::test_Queue_iter", "trio/tests/test_sync.py::test_Queue_statistics", "trio/tests/test_sync.py::test_Queue_fairness", "trio/tests/test_sync.py::test_Queue_unbuffered", "trio/tests/test_sync.py::test_generic_lock_exclusion[CapacityLimiter(1)]", "trio/tests/test_sync.py::test_generic_lock_exclusion[Semaphore(1)]", "trio/tests/test_sync.py::test_generic_lock_exclusion[Lock]", "trio/tests/test_sync.py::test_generic_lock_exclusion[StrictFIFOLock]", "trio/tests/test_sync.py::test_generic_lock_exclusion[QueueLock1(10)]", "trio/tests/test_sync.py::test_generic_lock_exclusion[QueueLock1(1)]", "trio/tests/test_sync.py::test_generic_lock_exclusion[QueueLock2]", "trio/tests/test_sync.py::test_generic_lock_exclusion[QueueLock3]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[CapacityLimiter(1)]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[Semaphore(1)]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[Lock]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[StrictFIFOLock]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[QueueLock1(10)]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[QueueLock1(1)]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[QueueLock2]", "trio/tests/test_sync.py::test_generic_lock_fifo_fairness[QueueLock3]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[CapacityLimiter(1)]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[Semaphore(1)]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[Lock]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[StrictFIFOLock]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[QueueLock1(10)]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[QueueLock1(1)]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[QueueLock2]", "trio/tests/test_sync.py::test_generic_lock_acquire_nowait_blocks_acquire[QueueLock3]" ]
[]
MIT/Apache-2.0 Dual License
2,842
[ "trio/_sync.py", "newsfragments/548.bugfix.rst" ]
[ "trio/_sync.py", "newsfragments/548.bugfix.rst" ]
lmfit__lmfit-py-487
6c87262fcfd3c361b197c6769852f76366113246
2018-07-29 12:46:35
449c6ed9f2d70c853507933c0e5f39c2ff75635e
newville: @schachmett thanks very much for the thorough explanations! I can believe these are all important and necessary changes, but I want to try this out myself too. For example, I'm not sure why this is generating errors in `_getval`, but think that maybe we should figure that out. Turning your very nice minimal example into a unit test should not be hard. For example, it could be added to `tests/test_parameter.py` using `assert` on the size of the asteval error array. My inclination is to merge this soon. If you don't have time or energy to add tests in the code, we can do that soon afterwards. Anyone else have concerns or objections to that? schachmett: @newville thank you. The errors in `_getval` are generated because of the `show_errors=False` argument to `_expr_eval`. When this option is set to `True`, the errors are printed (with some weird indentation) instead and `check_ast_errors` prints them a second time. I think I can find the time for adding tests this week, probably today or tomorrow. So you could leave the PR open until the tests are committed? schachmett: @newville Well, `show_errors=True` and the surrounding try, except could just be omitted at the cost of some duplicate printed output. As for your proposed changes, they would effectively be the same as my first commit. The problems there are: * It might be that the error array does not exist yet at this point in the code, raising an AttributeError on some occasions (That is why Travis CI failed on my first commit). * Inside `_expr_eval.parse()`, an exception is stored in the error list and then raised. If this error is catched by the surrounding application, the list is not cleaned afterwards and other parts of the code (meaning `_getval()`) can raise the same exception again via `check_ast_errors()`. This is why I did not let `parse()` raise an error of its own and why I get rid of old exceptions in the `finally` clause. -> It is ensured that the error list is cleaned **after** raising and it is only raised once (which would be the whole point of `check_ast_errors()`) * For `_getval`, essentially the same applies but here, other exceptions can be raised. `parse()` only checks for SyntaxErrors while calling `_expr_eval()` also gets nameerrors (see minimal example). That is why I wanted to call this once during `Parameter.set()`, so invalid expressions get detected right away. I don't know if this would break code where you set an expression for some parameter relating it to another not yet initialized parameter, or if that would already raise exceptions and is discouraged anyway. My code passing the nosetests indicates the latter. This looked real simple to me, too, in the beginning and maybe I am overthinking it now. But I don't see any real harm in writing try, except clauses in places where errors are checked, anyway. newville: On Wed, Aug 1, 2018 at 5:13 PM Simon Fischer <[email protected]> wrote: > @newville <https://github.com/newville> Well, show_errors=True and the > surrounding try, except could just be omitted at the cost of some duplicate > printed output. > > As for your proposed changes, they would effectively be the same as my > first commit. The problems there are: > > - It might be that the error array does not exist yet at this point in > the code, raising an AttributeError on some occasions (That is why Travis > CI failed on my first commit). > > Except that it will exist if it explicitly set to `[]`. > > - Inside _expr_eval.parse(), an exception is stored in the error list > and then raised. If this error is catched by the surrounding application, > the list is not cleaned afterwards and other parts of the code (meaning > _getval()) can raise the same exception again via check_ast_errors(). > This is why I did not let parse() raise an error of its own and why I > get rid of old exceptions in the finally clause. -> It is ensured that > the error list is cleaned *after* raising and it is only raised once > (which would be the whole point of check_ast_errors()) > > Yes, if there is an error in the expression that will be retained until explicitly cleared. And, if you do reset the expression, it *will* be cleared. If you do not reset the expression, there will be an error in the expression, and it will not evaluate correctly until fixed. So the existence of a non-empty `_expr_eval.error` means there is a serious problem that will prevent parameter evaluation. > > - For _getval, essentially the same applies but here, other exceptions > can be raised. parse() only checks for SyntaxErrors while calling > _expr_eval() also gets nameerrors (see minimal example). > > For sure, `parse()` is only sensitive to syntax errors not to other runtime exceptions. That is unavoidable. But, if you "clear errors" before resetting the expression, that should work. As you have it, with clearing the error in `_getval()` will happen *much* more often, and may ignore runtime errors, say during the fitting process. I'm not sure that is a good idea. > - That is why I wanted to call this once during Parameter.set(), so > invalid expressions get detected right away. > > But doing 0) clear errors 1) parse 2) check for errors will reliably (and clearly) check for parse errors right where they happen. > - I don't know if this would break code where you set an expression > for some parameter relating it to another not yet initialized parameter, or > if that would already raise exceptions and is discouraged anyway. My code > passing the nosetests indicates the latter. > > This looked real simple to me, too, in the beginning and maybe I am > overthinking it now. But I don't see any real harm in writing try, except > clauses in places where errors are checked, anyway. > A small change that addresses the actual problem where it actually happens (as opposed to trying to fix other potential problems) is almost always preferable. Please don't remove `check_ast_errors()`. schachmett: Okay, it seems I really did overthink it. I will revert to the simple solution tomorrow as it is getting late here. Do you however think that leaving _getval inside the set method to directly get the NameErrors when setting expr is a good idea? reneeotten: @schachmett @newville sorry for being late to the party and it looks like the issue is almost fix already! Two comments: 1) I am not sure that ```_getval``` should be called inside the function to set an expression. It seems that the purpose of this is to get a more direct/clear error message when using a variable in an expression that wasn't defined yet, correct? I would prefer to put ```check_ast_errors()``` back as @newville said earlier and explore if there is another way of doing this - I haven't checked carefully... so perhaps I'm mistaken! 2) once it's settled on what to do please squash the commits (probably into to two, one fixing the issue and another for adding the test) and use slightly more useful commit messages. schachmett: hi @reneeotten, yes you are right about the more direct error message. Essentially I would feel like ```python with self.assertRaises(NameError): p1['y'].set(expr='z*5.0') ``` should be in the tests and therefore either `_getval` or or `self._expr_eval(self._expr_ast)` directly would need to be called somewhere in `__set_expression` or in `set`. The reason for this test is that it makes evaluation of user input in an application easier. And oops, I just forgot about putting back `check_ast_errors` into `__set_expression` because the tests passed anyway with `_getval` being in there. If you @reneeotten and @newville could give me your opinion on the first paragraph I'd just implement it or not and then squash the commits like you said. newville: @schachmett I believe that `_getval()` should not be needed in `_set_expression()`. That is, I believe the value will be accessed by `Parameters.set()` immediately afterwards and the NameError raised anyway.
diff --git a/lmfit/parameter.py b/lmfit/parameter.py index 9a5c7d1a..235a919b 100644 --- a/lmfit/parameter.py +++ b/lmfit/parameter.py @@ -826,6 +826,8 @@ class Parameter(object): if val is None: self._expr_ast = None if val is not None and self._expr_eval is not None: + self._expr_eval.error = [] + self._expr_eval.error_msg = None self._expr_ast = self._expr_eval.parse(val) check_ast_errors(self._expr_eval) self._expr_deps = get_ast_names(self._expr_ast)
asteval errors are not flushed after raising #### Description With parameter.set(expr="foo"), if the expression raises an exception in asteval, this error is stored in parameter._expr_eval.errors. Now, the function https://github.com/lmfit/lmfit-py/blob/6c87262fcfd3c361b197c6769852f76366113246/lmfit/parameter.py#L18-L21 gets called from parameter.__set_expression(val) and checks if the length of this errors variable is longer than 0 and if it is, the exception is raised. However, if you try to set the expression for this parameter again (to a VALID expression), the Parameter._expr_eval.errors variable will still contain the first exception and so, check_ast_errors() will always raise an Exception. This should not be the expected behaviour, for example if lmfit is used from a GUI and the user tries to enter a valid expression after first entering an invalid one (where the exception was caught). I will submit a PR where check_ast_errors() flushes the parameter._expr_eval.errors before trying to set a new expression. ###### Version information Python: 3.6.5 (default, Apr 1 2018, 05:46:30) [GCC 7.3.0] lmfit: 0.9.11, scipy: 1.1.0, numpy: 1.15.0, asteval: 0.9.12, uncertainties: 3.0.2, six: 1.11.0 ###### Minimal example ```python import lmfit def set_constraints(paramname, expr): # params[paramname]._expr_eval.error.clear() # this would be the dirty fix try: params[paramname].set(expr=expr) print("expr was valid") except SyntaxError: params[paramname].set(expr="") print("expr was not valid") model = lmfit.models.PseudoVoigtModel() params = model.make_params() set_constraints("amplitude", expr="sigma * 2") set_constraints("amplitude", expr="fail *") set_constraints("amplitude", expr="sigma * 2") ``` produces: ```python expr was valid expr was not valid expr was not valid ``` If you uncomment the "dirty fix", it produces ```python expr was valid expr was not valid expr was valid ```
lmfit/lmfit-py
diff --git a/tests/test_parameters.py b/tests/test_parameters.py index 73604e47..7344241d 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -212,6 +212,23 @@ class TestParameters(unittest.TestCase): pars.add('c4', expr='csum-c1-c2-c3', min=0, max=1) assert_(isclose(pars['c4'].value, 0.2)) + def test_invalid_expr_exceptions(self): + "test if an exception is raised for invalid expressions (GH486)""" + p1 = Parameters() + p1.add('t', 2.0, min=0.0, max=5.0) + p1.add('x', 10.0) + with self.assertRaises(SyntaxError): + p1.add('y', expr='x*t + sqrt(t)/') + assert(len(p1['y']._expr_eval.error) > 0) + p1.add('y', expr='x*t + sqrt(t)/3.0') + p1['y'].set(expr='x*3.0 + t**2') + assert('x*3' in p1['y'].expr) + assert(len(p1['y']._expr_eval.error) == 0) + with self.assertRaises(SyntaxError): + p1['y'].set(expr='t+') + assert(len(p1['y']._expr_eval.error) > 0) + assert_almost_equal(p1['y'].value, 34.0) + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asteval==0.9.26 attrs==22.2.0 certifi==2021.5.30 future==1.0.0 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/lmfit/lmfit-py.git@6c87262fcfd3c361b197c6769852f76366113246#egg=lmfit nose==1.3.7 numpy==1.19.5 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 scipy==1.5.4 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 uncertainties==3.1.7 zipp==3.6.0
name: lmfit-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - asteval==0.9.26 - attrs==22.2.0 - future==1.0.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - scipy==1.5.4 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - uncertainties==3.1.7 - zipp==3.6.0 prefix: /opt/conda/envs/lmfit-py
[ "tests/test_parameters.py::TestParameters::test_invalid_expr_exceptions" ]
[]
[ "tests/test_parameters.py::TestParameters::test_add_many_params", "tests/test_parameters.py::TestParameters::test_copy", "tests/test_parameters.py::TestParameters::test_copy_function", "tests/test_parameters.py::TestParameters::test_deepcopy", "tests/test_parameters.py::TestParameters::test_dumps_loads_parameters", "tests/test_parameters.py::TestParameters::test_expr_and_constraints_GH265", "tests/test_parameters.py::TestParameters::test_expr_was_evaluated", "tests/test_parameters.py::TestParameters::test_expr_with_bounds", "tests/test_parameters.py::TestParameters::test_isclose", "tests/test_parameters.py::TestParameters::test_pickle_parameter", "tests/test_parameters.py::TestParameters::test_pickle_parameters", "tests/test_parameters.py::TestParameters::test_set_symtable" ]
[]
BSD-3
2,843
[ "lmfit/parameter.py" ]
[ "lmfit/parameter.py" ]
smarr__ReBench-103
4b105d255840335febcfcf6fd22c73c65ae1bff3
2018-07-29 18:53:14
4b105d255840335febcfcf6fd22c73c65ae1bff3
diff --git a/.travis.yml b/.travis.yml index 7d57c83..3d4f9fc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,13 +3,6 @@ language: python matrix: include: - python: "2.7" - virtualenv: - system_site_packages: true - addons: - apt: - packages: - - python-scipy - - python: "3.6" # PyPy versions - python: pypy diff --git a/rebench/executor.py b/rebench/executor.py index 01c99c4..4d7499e 100644 --- a/rebench/executor.py +++ b/rebench/executor.py @@ -35,7 +35,6 @@ from humanfriendly.compat import coerce_string from . import subprocess_with_timeout as subprocess_timeout from .interop.adapter import ExecutionDeliveredNoResults -from .statistics import StatisticProperties, mean class FailedBuilding(Exception): @@ -83,14 +82,10 @@ class RunScheduler(object): if not self._ui.spinner_initialized(): return - totals = run.get_total_values() if completed_task: self._runs_completed += 1 - if totals: - art_mean = mean(run.get_total_values()) - else: - art_mean = 0 + art_mean = run.get_mean_of_totals() hour, minute, sec = self._estimate_time_left() @@ -419,15 +414,15 @@ class Executor(object): terminate = self._generate_data_point(cmdline, gauge_adapter, run_id, termination_check) - stats = StatisticProperties(run_id.get_total_values()) + mean_of_totals = run_id.get_mean_of_totals() if terminate: - run_id.report_run_completed(stats, cmdline) + run_id.report_run_completed(cmdline) if (not run_id.is_failed() and run_id.min_iteration_time - and stats.mean < run_id.min_iteration_time): + and mean_of_totals < run_id.min_iteration_time): self._ui.warning( ("{ind}Warning: Low mean run time.\n" + "{ind}{ind}The mean (%.1f) is lower than min_iteration_time (%d)\n") - % (stats.mean, run_id.min_iteration_time), run_id, cmdline) + % (mean_of_totals, run_id.min_iteration_time), run_id, cmdline) return terminate diff --git a/rebench/model/data_point.py b/rebench/model/data_point.py index f04ed2f..58775ff 100644 --- a/rebench/model/data_point.py +++ b/rebench/model/data_point.py @@ -27,6 +27,10 @@ class DataPoint(object): self._total = None self._invocation = -1 + @property + def run_id(self): + return self._run_id + @property def invocation(self): return self._invocation diff --git a/rebench/model/run_id.py b/rebench/model/run_id.py index 0d0ca95..846b4c3 100644 --- a/rebench/model/run_id.py +++ b/rebench/model/run_id.py @@ -21,6 +21,7 @@ import re from .benchmark import Benchmark from .termination_check import TerminationCheck +from ..statistics import StatisticProperties from ..ui import UIError @@ -34,7 +35,8 @@ class RunId(object): self._reporters = set() self._persistence = set() - self._data_points = [] + self._statistics = StatisticProperties() + self._total_unit = None self._termination_check = None self._cmdline = None @@ -136,9 +138,9 @@ class RunId(object): for reporter in self._reporters: reporter.run_failed(self, cmdline, return_code, output) - def report_run_completed(self, statistics, cmdline): + def report_run_completed(self, cmdline): for reporter in self._reporters: - reporter.run_completed(self, statistics, cmdline) + reporter.run_completed(self, self._statistics, cmdline) def report_job_completed(self, run_ids): for reporter in self._reporters: @@ -152,6 +154,9 @@ class RunId(object): for reporter in self._reporters: reporter.start_run(self) + def is_persisted_by(self, persistence): + return persistence in self._persistence + def add_persistence(self, persistence): self._persistence.add(persistence) @@ -159,35 +164,34 @@ class RunId(object): for persistence in self._persistence: persistence.close() - def loaded_data_point(self, data_point): + def _new_data_point(self, data_point): self._max_invocation = max(self._max_invocation, data_point.invocation) - self._data_points.append(data_point) + if self._total_unit is None: + self._total_unit = data_point.get_total_unit() + + def loaded_data_point(self, data_point): + self._new_data_point(data_point) + self._statistics.add_sample(data_point.get_total_value()) def add_data_point(self, data_point, warmup): - self._max_invocation = max(self._max_invocation, data_point.invocation) + self._new_data_point(data_point) if not warmup: - self._data_points.append(data_point) + self._statistics.add_sample(data_point.get_total_value()) for persistence in self._persistence: persistence.persist_data_point(data_point) def get_number_of_data_points(self): - return len(self._data_points) + return self._statistics.num_samples - def get_data_points(self): - return self._data_points + def get_mean_of_totals(self): + return self._statistics.mean - def discard_data_points(self): - self._data_points = [] - self._max_invocation = 0 - - def get_total_values(self): - return [dp.get_total_value() for dp in self._data_points] + def get_statistics(self): + return self._statistics def get_total_unit(self): - if not self._data_points: - return None - return self._data_points[0].get_total_unit() + return self._total_unit def get_termination_check(self, ui): if self._termination_check is None: @@ -202,7 +206,7 @@ class RunId(object): def run_failed(self): return (self._termination_check.fails_consecutively() or self._termination_check.has_too_many_failures( - len(self._data_points))) + self.get_number_of_data_points())) def __hash__(self): return hash(self.cmdline()) @@ -236,18 +240,22 @@ class RunId(object): def cmdline(self): if self._cmdline: return self._cmdline + return self._construct_cmdline() + def _construct_cmdline(self): cmdline = "" if self._benchmark.suite.executor.path: - cmdline = "%s/" % (self._benchmark.suite.executor.path, ) + cmdline = self._benchmark.suite.executor.path + "/" + + cmdline += self._benchmark.suite.executor.executable - cmdline += "%s %s" % (self._benchmark.suite.executor.executable, - self._benchmark.suite.executor.args or '') + if self._benchmark.suite.executor.args: + cmdline += " " + str(self._benchmark.suite.executor.args) - cmdline += self._benchmark.suite.command + cmdline += " " + self._benchmark.suite.command - if self._benchmark.extra_args is not None: - cmdline += " %s" % self._benchmark.extra_args + if self._benchmark.extra_args: + cmdline += " " + str(self._benchmark.extra_args) cmdline = self._expand_vars(cmdline) diff --git a/rebench/persistence.py b/rebench/persistence.py index f4cce13..16af47c 100644 --- a/rebench/persistence.py +++ b/rebench/persistence.py @@ -18,9 +18,11 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import os +import shutil import subprocess import sys from datetime import datetime +from tempfile import NamedTemporaryFile from threading import Lock from .model.data_point import DataPoint @@ -36,9 +38,9 @@ class DataStore(object): self._bench_cfgs = {} self._ui = ui - def load_data(self): + def load_data(self, runs, discard_run_data): for persistence in list(self._files.values()): - persistence._load_data() + persistence.load_data(runs, discard_run_data) def get(self, filename, discard_old_data): if filename not in self._files: @@ -81,41 +83,6 @@ class DataStore(object): self._bench_cfgs[key] = cfg return cfg - @classmethod - def get_by_file(cls, runs): - by_file = {} - for run in runs: - points = run.get_data_points() - run.discard_data_points() - for point in points: - measurements = point.get_measurements() - for measure in measurements: - if measure.filename in by_file: - by_file[measure.filename].append(measure) - else: - by_file[measure.filename] = [measure] - return by_file - - @classmethod - def discard_data_of_runs(cls, runs, ui): - by_file = cls.get_by_file(runs) - for filename, measures in by_file.items(): - try: - with open(filename, 'r') as data_file: - lines = data_file.readlines() - except IOError: - ui.debug_error_info( - "Tried to discard old data, but file does not seem to exist: %s\n" % filename) - continue - - for measure in measures: - lines[measure.line_number] = None - - lines = filter(None, lines) - - with open(filename, 'w') as data_file: - data_file.writelines(lines) - class _DataPointPersistence(object): @@ -141,18 +108,30 @@ class _DataPointPersistence(object): with open(filename, 'w'): pass - def _load_data(self): + def load_data(self, runs, discard_run_data): """ Loads the data from the configured data file """ + if discard_run_data: + current_runs = {run for run in runs if run.is_persisted_by(self)} + else: + current_runs = None + try: - with open(self._data_filename, 'r') as data_file: - self._process_lines(data_file) + if current_runs: + with NamedTemporaryFile('w', delete=False) as target: + with open(self._data_filename, 'r') as data_file: + self._process_lines(data_file, current_runs, target) + os.unlink(self._data_filename) + shutil.move(target.name, self._data_filename) + else: + with open(self._data_filename, 'r') as data_file: + self._process_lines(data_file, current_runs, None) except IOError: self._ui.debug_error_info("No data loaded, since %s does not exist.\n" % self._data_filename) - def _process_lines(self, data_file): + def _process_lines(self, data_file, runs, filtered_data_file): """ The most important assumptions we make here is that the total measurement is always the last one serialized for a data point. @@ -165,6 +144,8 @@ class _DataPointPersistence(object): for line in data_file: if line.startswith('#'): # skip comments, and shebang lines line_number += 1 + if filtered_data_file: + filtered_data_file.write(line) continue try: @@ -173,6 +154,13 @@ class _DataPointPersistence(object): line_number, self._data_filename) run_id = measurement.run_id + if filtered_data_file and runs and run_id in runs: + continue + + # these are all the measurements that are not filtered out + if filtered_data_file: + filtered_data_file.write(line) + if previous_run_id is not run_id: data_point = DataPoint(run_id) previous_run_id = run_id diff --git a/rebench/rebench.py b/rebench/rebench.py index d561f84..8f54724 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -203,22 +203,16 @@ Argument: except ConfigurationError as exc: raise UIError(exc.message + "\n", exc) - data_store.load_data() - return self.execute_experiment() + runs = self._config.get_runs() + data_store.load_data(runs, self._config.options.do_rerun) + return self.execute_experiment(runs) - def execute_experiment(self): + def execute_experiment(self, runs): self._ui.verbose_output_info("Execute experiment: " + self._config.experiment_name + "\n") - # first load old data if available - if self._config.options.clean: - pass - scheduler_class = {'batch': BatchScheduler, 'round-robin': RoundRobinScheduler, 'random': RandomScheduler}.get(self._config.options.scheduler) - runs = self._config.get_runs() - if self._config.options.do_rerun: - DataStore.discard_data_of_runs(runs, self._ui) executor = Executor(runs, self._config.use_nice, self._config.do_builds, self._ui, diff --git a/rebench/reporter.py b/rebench/reporter.py index 6674a3a..4b8b197 100644 --- a/rebench/reporter.py +++ b/rebench/reporter.py @@ -36,8 +36,6 @@ except ImportError: from urllib import urlencode # pylint: disable=ungrouped-imports from urllib2 import urlopen -from .statistics import StatisticProperties - class Reporter(object): @@ -80,13 +78,14 @@ class TextReporter(Reporter): rows = [] for run_id in run_ids: - stats = StatisticProperties(run_id.get_total_values()) + mean = run_id.get_mean_of_totals() + num_samples = run_id.get_number_of_data_points() out = run_id.as_str_list() - out.append(stats.num_samples) - if stats.num_samples == 0: + out.append(num_samples) + if num_samples == 0: out.append("Failed") else: - out.append(int(round(stats.mean, 0))) + out.append(int(round(mean, 0))) rows.append(out) return sorted(rows, key=itemgetter(2, 1, 3, 4, 5, 6, 7)) @@ -248,8 +247,7 @@ class CodespeedReporter(Reporter): + "{ind}{ind}" + msg + "\n", run_id) def _prepare_result(self, run_id): - stats = StatisticProperties(run_id.get_total_values()) - return self._format_for_codespeed(run_id, stats) + return self._format_for_codespeed(run_id, run_id.get_statistics()) def report_job_completed(self, run_ids): if self._incremental_report: diff --git a/rebench/statistics.py b/rebench/statistics.py index bcccbc6..945243a 100644 --- a/rebench/statistics.py +++ b/rebench/statistics.py @@ -17,81 +17,64 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import copy import math -import operator -from functools import reduce # pylint: disable=redefined-builtin - - -def mean(values): - return sum(values) / float(len(values)) - - -def median(values): - values_ = sorted(copy.deepcopy(values)) - index = int(len(values_) / 2) - if len(values_) % 2 == 0: - return float(values_[index] + values_[index - 1]) / 2 - else: - return values_[index] - - -def geo_mean(values): - product = reduce(operator.mul, values, 1) - return product ** (1.0 / len(values)) - - -def variance(values): - avg = mean(values) - var = [(x - avg) ** 2 for x in values] - return mean(var) - - -def stddev(values): - return math.sqrt(variance(values)) class StatisticProperties(object): """ - The statistics class calculates the statistic - properties of a given set of data samples, i.e., the chosen values - from a set of data points + The class maintains running statistics for the added data points. + Data points can be added one by one, or as lists of values. """ - def __init__(self, data_samples): - self._data_samples = data_samples - - self.mean = 0 - self.geom_mean = 0 - self.median = 0 - self.std_dev = 0 + def __init__(self): self.num_samples = 0 + + self.mean = 0.0 + self.geom_mean = 0.0 + self.std_dev = 0.0 self.min = 0 self.max = 0 - if self._data_samples: - self.num_samples = len(self._data_samples) - self._calc_basic_statistics() + # used to calculate std_dev + # Variance (sigma^2) * num_samples + self._variance_times_num_samples = 0 + # used to calculate geomean + self._product_of_samples = 1.0 + + def add(self, samples): + for sample in samples: + self.add_sample(sample) + + def add_sample(self, sample): + if self.num_samples == 0: + self.mean = float(sample) + self.geom_mean = float(sample) + self._product_of_samples = float(sample) + + self.min = sample + self.max = sample + self.num_samples = 1 else: - self.num_samples = 0 + self.num_samples += 1 + prev_mean = self.mean + self.mean = prev_mean + ((sample - prev_mean) / self.num_samples) + + self._product_of_samples = self._product_of_samples * float(sample) + self.geom_mean = self._product_of_samples ** (1/float(self.num_samples)) + + self._variance_times_num_samples = (self._variance_times_num_samples + + ((sample - prev_mean) * (sample - self.mean))) + self.std_dev = math.sqrt(self._variance_times_num_samples / self.num_samples) - def _calc_basic_statistics(self): - """This function determines the mean and the standard deviation - of the data sample. - Furthermore, several other simple properties are determined. - """ - self.mean = mean(self._data_samples) - self.geom_mean = geo_mean(self._data_samples) - self.median = median(self._data_samples) - self.std_dev = stddev(self._data_samples) + if self.min > sample: + self.min = sample - self.min = min(self._data_samples) - self.max = max(self._data_samples) + if self.max < sample: + self.max = sample def as_tuple(self): return (self.mean, self.geom_mean, - self.median, self.std_dev, self.num_samples, self.min, @@ -99,5 +82,5 @@ class StatisticProperties(object): @classmethod def tuple_mapping(cls): - return ('arithmetic mean', 'geometric mean', 'median', 'stdDev', + return ('arithmetic mean', 'geometric mean', 'stdDev', '#samples', 'min', 'max') diff --git a/setup.py b/setup.py index 725b243..48f47ce 100644 --- a/setup.py +++ b/setup.py @@ -39,7 +39,6 @@ setup(name='ReBench', entry_points = { 'console_scripts' : ['rebench = rebench.rebench:main_func'] }, - tests_require = ['scipy>=0.8.0'], test_suite = 'rebench.tests', license = 'MIT' )
Memory usage for long running benchmarks with many data points Currently, we can run out of memory, because we keep all data points and measurements available. In addition to storing them in memory. https://math.stackexchange.com/questions/106700/incremental-averageing
smarr/ReBench
diff --git a/rebench/tests/executor_test.py b/rebench/tests/executor_test.py index 297b5a4..01e55ba 100644 --- a/rebench/tests/executor_test.py +++ b/rebench/tests/executor_test.py @@ -21,6 +21,7 @@ import unittest import subprocess import os +from .persistence import TestPersistence from .rebench_test_case import ReBenchTestCase from ..rebench import ReBench from ..executor import Executor @@ -54,19 +55,6 @@ class ExecutorTest(ReBenchTestCase): finally: subprocess.Popen = old_popen -# TODO: should test more details -# (mean, sdev, (interval, interval_percentage), -# (interval_t, interval_percentage_t)) = ex.result['test-executor']['test-bench'] -# -# self.assertEqual(31, len(ex.benchmark_data['test-executor']['test-bench'])) -# self.assertAlmostEqual(45870.4193548, mean) -# self.assertAlmostEqual(2.93778711485, sdev) -# -# (i_low, i_high) = interval -# self.assertAlmostEqual(45869.385195243565, i_low) -# self.assertAlmostEqual(45871.453514433859, i_high) -# self.assertAlmostEqual(0.00450904792104, interval_percentage) - def test_broken_command_format_with_ValueError(self): with self.assertRaises(UIError) as err: options = ReBench().shell_options().parse_args(['dummy']) @@ -92,10 +80,16 @@ class ExecutorTest(ReBenchTestCase): def _basic_execution(self, cnf): runs = cnf.get_runs() self.assertEqual(8, len(runs)) - ex = Executor(cnf.get_runs(), cnf.use_nice, cnf.do_builds, TestDummyUI()) + + runs = cnf.get_runs() + persistence = TestPersistence() + for run in runs: + run.add_persistence(persistence) + + ex = Executor(runs, cnf.use_nice, cnf.do_builds, TestDummyUI()) ex.execute() for run in runs: - data_points = run.get_data_points() + data_points = persistence.get_data_points(run) self.assertEqual(10, len(data_points)) for data_point in data_points: measurements = data_point.get_measurements() diff --git a/rebench/tests/features/issue_15_warm_up_support_test.py b/rebench/tests/features/issue_15_warm_up_support_test.py index af7e26a..68c2043 100644 --- a/rebench/tests/features/issue_15_warm_up_support_test.py +++ b/rebench/tests/features/issue_15_warm_up_support_test.py @@ -57,4 +57,3 @@ class Issue15WarmUpSupportTest(ReBenchTestCase): ex.execute() self.assertEqual(runs[0].get_number_of_data_points(), 10) - self.assertLessEqual(runs[0].get_total_values()[0], 850) diff --git a/rebench/tests/features/issue_16_multiple_data_points_test.py b/rebench/tests/features/issue_16_multiple_data_points_test.py index b9e55c7..c672930 100644 --- a/rebench/tests/features/issue_16_multiple_data_points_test.py +++ b/rebench/tests/features/issue_16_multiple_data_points_test.py @@ -20,6 +20,7 @@ from ...configurator import Configurator, load_config from ...executor import Executor from ...persistence import DataStore +from ..persistence import TestPersistence from ..rebench_test_case import ReBenchTestCase @@ -38,12 +39,16 @@ class Issue16MultipleDataPointsTest(ReBenchTestCase): cnf = Configurator(load_config(self._path + '/issue_16.conf'), DataStore(self._ui), self._ui, exp_name=exp_name, data_file=self._tmp_file) + runs = cnf.get_runs() + persistence = TestPersistence() + for run in runs: + run.add_persistence(persistence) ex = Executor(cnf.get_runs(), False, False, self._ui) ex.execute() self.assertEqual(1, len(cnf.get_runs())) run = next(iter(cnf.get_runs())) - self.assertEqual(num_data_points, len(run.get_data_points())) - return run.get_data_points() + self.assertEqual(num_data_points, run.get_number_of_data_points()) + return persistence.get_data_points() def test_records_multiple_data_points_from_single_execution_10(self): self._records_data_points('Test1', 10) diff --git a/rebench/tests/features/issue_31_multivariate_data_points_test.py b/rebench/tests/features/issue_31_multivariate_data_points_test.py index 8359fbc..72e9420 100644 --- a/rebench/tests/features/issue_31_multivariate_data_points_test.py +++ b/rebench/tests/features/issue_31_multivariate_data_points_test.py @@ -22,6 +22,7 @@ from __future__ import print_function from ...configurator import Configurator, load_config from ...executor import Executor from ...persistence import DataStore +from ..persistence import TestPersistence from ..rebench_test_case import ReBenchTestCase @@ -38,12 +39,17 @@ class Issue31MultivariateDataPointsTest(ReBenchTestCase): cnf = Configurator(load_config(self._path + '/issue_31.conf'), DataStore(self._ui), self._ui, exp_name=exp_name, data_file=self._tmp_file) - ex = Executor(cnf.get_runs(), False, False, self._ui) + runs = cnf.get_runs() + persistence = TestPersistence() + for run in runs: + run.add_persistence(persistence) + + ex = Executor(runs, False, False, self._ui) ex.execute() self.assertEqual(1, len(cnf.get_runs())) run = next(iter(cnf.get_runs())) - self.assertEqual(num_data_points, len(run.get_data_points())) - return run.get_data_points() + self.assertEqual(num_data_points, run.get_number_of_data_points()) + return persistence.get_data_points() def test_records_multiple_data_points_from_single_execution_10(self): self._records_data_points('Test1', 10) diff --git a/rebench/tests/persistence.py b/rebench/tests/persistence.py new file mode 100644 index 0000000..5675014 --- /dev/null +++ b/rebench/tests/persistence.py @@ -0,0 +1,15 @@ +class TestPersistence(object): + + def __init__(self): + self._data_points = [] + + def get_data_points(self, run_id=None): + if run_id: + return [dp for dp in self._data_points if dp.run_id is run_id] + return self._data_points + + def persist_data_point(self, data_point): + self._data_points.append(data_point) + + def close(self): + pass diff --git a/rebench/tests/persistency_test.py b/rebench/tests/persistency_test.py index 9f8a72c..2e6d373 100644 --- a/rebench/tests/persistency_test.py +++ b/rebench/tests/persistency_test.py @@ -66,11 +66,11 @@ class PersistencyTest(ReBenchTestCase): self.assertEqual(num_invocations, run.completed_invocations) def test_iteration_invocation_semantics(self): - ## Executes first time + # Executes first time ds = DataStore(self._ui) cnf = Configurator(load_config(self._path + '/persistency.conf'), ds, self._ui, data_file=self._tmp_file) - ds.load_data() + ds.load_data(None, False) self._assert_runs(cnf, 1, 0, 0) @@ -79,12 +79,12 @@ class PersistencyTest(ReBenchTestCase): self._assert_runs(cnf, 1, 10, 10) - ## Execute a second time, should not add any data points, - ## because goal is already reached + # Execute a second time, should not add any data points, + # because goal is already reached ds2 = DataStore(self._ui) cnf2 = Configurator(load_config(self._path + '/persistency.conf'), ds2, self._ui, data_file=self._tmp_file) - ds2.load_data() + ds2.load_data(None, False) self._assert_runs(cnf2, 1, 10, 10) @@ -92,3 +92,31 @@ class PersistencyTest(ReBenchTestCase): ex2.execute() self._assert_runs(cnf2, 1, 10, 10) + + def test_data_discarding(self): + # Executes first time + ds = DataStore(self._ui) + cnf = Configurator(load_config(self._path + '/persistency.conf'), + ds, self._ui, data_file=self._tmp_file) + ds.load_data(None, False) + + self._assert_runs(cnf, 1, 0, 0) + + ex = Executor(cnf.get_runs(), False, False, self._ui) + ex.execute() + + self._assert_runs(cnf, 1, 10, 10) + + # Execute a second time, this time, discard the data first, and then rerun + ds2 = DataStore(self._ui) + cnf2 = Configurator(load_config(self._path + '/persistency.conf'), + ds2, self._ui, data_file=self._tmp_file) + run2 = cnf2.get_runs() + ds2.load_data(run2, True) + + self._assert_runs(cnf2, 1, 0, 0) + + ex2 = Executor(run2, False, False, self._ui) + ex2.execute() + + self._assert_runs(cnf2, 1, 10, 10) diff --git a/rebench/tests/stats_test.py b/rebench/tests/stats_test.py index d233076..2ca8471 100644 --- a/rebench/tests/stats_test.py +++ b/rebench/tests/stats_test.py @@ -18,20 +18,8 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. - -## This code is inspired by https://code.google.com/p/python-statlib - import unittest -from .. import statistics as stats - -NUMPY_AND_SCIPY_AVAILABLE = False -try: - import numpy - import scipy.stats - # import scipy.stats.distributions -- not yet implemented - NUMPY_AND_SCIPY_AVAILABLE = True -except ImportError: - NUMPY_AND_SCIPY_AVAILABLE = False +from ..statistics import StatisticProperties class StatsTest(unittest.TestCase): @@ -44,136 +32,39 @@ class StatsTest(unittest.TestCase): self._mixed = [x if x % 2 == 0 else float(x) + 4.5 for x in self._integers] - def test_mean_simple(self): - self.assertEqual(2, stats.mean([1, 2, 3])) - self.assertAlmostEqual(2, stats.mean([1.0, 2.0, 3.0])) - - self.assertAlmostEqual(25, stats.mean(self._integers)) - self.assertAlmostEqual(25, stats.mean(self._floats)) - self.assertAlmostEqual(25 + 2.31, stats.mean(self._floats2)) - self.assertAlmostEqual(27.295918367, stats.mean(self._mixed)) - - @unittest.skipUnless(NUMPY_AND_SCIPY_AVAILABLE, - "NumPy or SciPy is not available") - def test_mean_vs_numpy(self): - self.assertEqual(numpy.mean([1, 2, 3]), - stats.mean([1, 2, 3])) - self.assertAlmostEqual(numpy.mean([1.0, 2.0, 3.0]), - stats.mean([1.0, 2.0, 3.0])) - - self.assertAlmostEqual(numpy.mean(self._integers), - stats.mean(self._integers)) - - self.assertAlmostEqual(numpy.mean(self._floats), - stats.mean(self._floats)) - - self.assertAlmostEqual(numpy.mean(self._floats2), - stats.mean(self._floats2)) - - self.assertAlmostEqual(numpy.mean(self._mixed), - stats.mean(self._mixed)) - - def test_median_simple(self): - self.assertEqual(2.5, stats.median([1, 2, 3, 4])) - self.assertAlmostEqual(2.5, stats.median([1.0, 2.0, 3.0, 4.0])) - - self.assertAlmostEqual(25, stats.median(self._integers)) - self.assertAlmostEqual(25, stats.median(self._floats)) - self.assertAlmostEqual(25 + 2.31, stats.median(self._floats2)) - self.assertAlmostEqual(27.5, stats.median(self._mixed)) - - @unittest.skipUnless(NUMPY_AND_SCIPY_AVAILABLE, - "NumPy or SciPy is not available") - def test_median_vs_numpy(self): - self.assertEqual(numpy.median([1, 2, 3, 4]), - stats.median([1, 2, 3, 4])) - self.assertAlmostEqual(numpy.median([1.0, 2.0, 3.0, 4.0]), - stats.median([1.0, 2.0, 3.0, 4.0])) - - self.assertAlmostEqual(numpy.median(self._integers), - stats.median(self._integers)) - - self.assertAlmostEqual(numpy.median(self._floats), - stats.median(self._floats)) - - self.assertAlmostEqual(numpy.median(self._floats2), - stats.median(self._floats2)) - - self.assertAlmostEqual(numpy.median(self._mixed), - stats.median(self._mixed)) - - def test_geomean_simple(self): - self.assertAlmostEqual(1.817120592, stats.geo_mean([1, 2, 3])) - self.assertAlmostEqual(1.817120592, stats.geo_mean([1.0, 2.0, 3.0])) - - self.assertAlmostEqual(19.112093553, stats.geo_mean(self._integers)) - self.assertAlmostEqual(19.112093553, stats.geo_mean(self._floats)) - self.assertAlmostEqual(22.533409416, stats.geo_mean(self._floats2)) - self.assertAlmostEqual(22.245044799, stats.geo_mean(self._mixed)) - - @unittest.skipUnless(NUMPY_AND_SCIPY_AVAILABLE, - "NumPy or SciPy is not available") - def test_geomean_vs_scipy(self): - self.assertAlmostEqual(scipy.stats.gmean([1, 2, 3]), - stats.geo_mean([1, 2, 3])) - self.assertAlmostEqual(scipy.stats.gmean([1.0, 2.0, 3.0]), - stats.geo_mean([1.0, 2.0, 3.0])) - - self.assertAlmostEqual(scipy.stats.gmean(self._integers), - stats.geo_mean(self._integers)) - - self.assertAlmostEqual(scipy.stats.gmean(self._floats), - stats.geo_mean(self._floats)) - - self.assertAlmostEqual(scipy.stats.gmean(self._floats2), - stats.geo_mean(self._floats2)) - - self.assertAlmostEqual(scipy.stats.gmean(self._mixed), - stats.geo_mean(self._mixed)) - - def test_stddev_simple(self): - self.assertAlmostEqual(0.8164965809, - stats.stddev([1, 2, 3])) - self.assertAlmostEqual(0.8164965809, - stats.stddev([1.0, 2.0, 3.0])) - - self.assertAlmostEqual(14.142135623, - stats.stddev(self._integers)) - - self.assertAlmostEqual(14.142135623, - stats.stddev(self._floats)) - - self.assertAlmostEqual(14.142135623, - stats.stddev(self._floats2)) - - self.assertAlmostEqual(14.319929870, - stats.stddev(self._mixed)) - - @unittest.skipUnless(NUMPY_AND_SCIPY_AVAILABLE, - "NumPy or SciPy is not available") - def test_stddev_vs_numpy(self): - self.assertAlmostEqual(numpy.std([1, 2, 3]), - stats.stddev([1, 2, 3])) - self.assertAlmostEqual(numpy.std([1.0, 2.0, 3.0]), - stats.stddev([1.0, 2.0, 3.0])) - - self.assertAlmostEqual(numpy.std(self._integers), - stats.stddev(self._integers)) - - self.assertAlmostEqual(numpy.std(self._floats), - stats.stddev(self._floats)) - - self.assertAlmostEqual(numpy.std(self._floats2), - stats.stddev(self._floats2)) - - self.assertAlmostEqual(numpy.std(self._mixed), - stats.stddev(self._mixed)) - -# TODO: implement -# @unittest.skipUnless(are_numpy_and_scipy_available, "NumPy or SciPy is not available") -# def test_norm_distribution(self): -# self.fail("not yet implemented") -# -# @unittest.skipUnless(are_numpy_and_scipy_available, "NumPy or SciPy is not available") -# def test_t_distribution(self): -# self.fail("not yet implemented") + def _assert(self, stats, mean_val, geo_mean, min_val, max_val, std_dev): + self.assertAlmostEqual(mean_val, stats.mean) + self.assertAlmostEqual(geo_mean, stats.geom_mean) + + self.assertEqual(min_val, stats.min) + self.assertEqual(max_val, stats.max) + self.assertAlmostEqual(std_dev, stats.std_dev) + + def test_123(self): + stats = StatisticProperties() + stats.add([1, 2, 3]) + self._assert(stats, 2, 1.817120592, 1, 3, 0.816496580927726) + + stats = StatisticProperties() + stats.add([1.0, 2.0, 3.0]) + self._assert(stats, 2, 1.817120592, 1.0, 3.0, 0.816496580927726) + + def test_1to49(self): + stats = StatisticProperties() + stats.add(self._integers) + self._assert(stats, 25, 19.112093553, 1, 49, 14.142135623730951) + + stats = StatisticProperties() + stats.add(self._floats) + self._assert(stats, 25, 19.112093553, 1.0, 49.0, 14.142135623730951) + + def test_shifted(self): + stats = StatisticProperties() + stats.add(self._floats2) + self._assert(stats, 25 + 2.31, 22.533409416, 1.0 + 2.31, 49.0 + 2.31, 14.142135623730951) + + def test_mixed(self): + stats = StatisticProperties() + stats.add(self._mixed) + self.assertAlmostEqual(27.295918367, stats.mean) + self._assert(stats, 27.295918367, 22.245044799, 2, 53.5, 14.319929870761944)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 9 }
0.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y cpuset" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
docopt==0.6.2 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work humanfriendly==10.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work nose==1.3.7 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pykwalify==1.8.0 pytest @ file:///croot/pytest_1738938843180/work python-dateutil==2.9.0.post0 PyYAML==6.0.2 -e git+https://github.com/smarr/ReBench.git@4b105d255840335febcfcf6fd22c73c65ae1bff3#egg=ReBench ruamel.yaml==0.18.10 ruamel.yaml.clib==0.2.12 six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: ReBench channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - docopt==0.6.2 - humanfriendly==10.0 - nose==1.3.7 - pykwalify==1.8.0 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - ruamel-yaml==0.18.10 - ruamel-yaml-clib==0.2.12 - six==1.17.0 prefix: /opt/conda/envs/ReBench
[ "rebench/tests/stats_test.py::StatsTest::test_123", "rebench/tests/stats_test.py::StatsTest::test_1to49", "rebench/tests/stats_test.py::StatsTest::test_mixed", "rebench/tests/stats_test.py::StatsTest::test_shifted" ]
[ "rebench/tests/executor_test.py::ExecutorTest::test_basic_execution", "rebench/tests/executor_test.py::ExecutorTest::test_basic_execution_with_magic_all", "rebench/tests/executor_test.py::ExecutorTest::test_broken_command_format_with_TypeError", "rebench/tests/executor_test.py::ExecutorTest::test_broken_command_format_with_ValueError", "rebench/tests/executor_test.py::ExecutorTest::test_execution_with_invocation_and_iteration_set", "rebench/tests/executor_test.py::ExecutorTest::test_execution_with_quick_set", "rebench/tests/executor_test.py::ExecutorTest::test_setup_and_run_benchmark", "rebench/tests/features/issue_15_warm_up_support_test.py::Issue15WarmUpSupportTest::test_run_id_indicates_warm_up_iterations_required", "rebench/tests/features/issue_15_warm_up_support_test.py::Issue15WarmUpSupportTest::test_warm_up_results_should_be_ignored", "rebench/tests/features/issue_16_multiple_data_points_test.py::Issue16MultipleDataPointsTest::test_associates_measurements_and_data_points_correctly", "rebench/tests/features/issue_16_multiple_data_points_test.py::Issue16MultipleDataPointsTest::test_records_multiple_data_points_from_single_execution_10", "rebench/tests/features/issue_16_multiple_data_points_test.py::Issue16MultipleDataPointsTest::test_records_multiple_data_points_from_single_execution_20", "rebench/tests/features/issue_31_multivariate_data_points_test.py::Issue31MultivariateDataPointsTest::test_associates_measurements_and_data_points_correctly", "rebench/tests/features/issue_31_multivariate_data_points_test.py::Issue31MultivariateDataPointsTest::test_is_compatible_to_issue16_format", "rebench/tests/features/issue_31_multivariate_data_points_test.py::Issue31MultivariateDataPointsTest::test_records_multiple_data_points_from_single_execution_10", "rebench/tests/features/issue_31_multivariate_data_points_test.py::Issue31MultivariateDataPointsTest::test_records_multiple_data_points_from_single_execution_20", "rebench/tests/features/issue_31_multivariate_data_points_test.py::Issue31MultivariateDataPointsTest::test_records_multiple_data_points_from_single_execution_30", "rebench/tests/persistency_test.py::PersistencyTest::test_data_discarding", "rebench/tests/persistency_test.py::PersistencyTest::test_iteration_invocation_semantics" ]
[ "rebench/tests/executor_test.py::ExecutorTest::test_determine_exp_name_and_filters_all", "rebench/tests/executor_test.py::ExecutorTest::test_determine_exp_name_and_filters_all_and_other", "rebench/tests/executor_test.py::ExecutorTest::test_determine_exp_name_and_filters_empty", "rebench/tests/executor_test.py::ExecutorTest::test_determine_exp_name_and_filters_only_others", "rebench/tests/executor_test.py::ExecutorTest::test_determine_exp_name_and_filters_some_name", "rebench/tests/executor_test.py::ExecutorTest::test_shell_options_with_executor_filter", "rebench/tests/executor_test.py::ExecutorTest::test_shell_options_with_filters", "rebench/tests/executor_test.py::ExecutorTest::test_shell_options_without_filters", "rebench/tests/executor_test.py::test_suite", "rebench/tests/persistency_test.py::PersistencyTest::test_de_serialization" ]
[]
MIT License
2,844
[ "rebench/statistics.py", "rebench/executor.py", "rebench/persistence.py", "setup.py", "rebench/model/run_id.py", ".travis.yml", "rebench/reporter.py", "rebench/rebench.py", "rebench/model/data_point.py" ]
[ "rebench/statistics.py", "rebench/executor.py", "rebench/persistence.py", "setup.py", "rebench/model/run_id.py", ".travis.yml", "rebench/reporter.py", "rebench/rebench.py", "rebench/model/data_point.py" ]
NeurodataWithoutBorders__pynwb-575
432699f83ecb4747943a6c3bfd052b9cd5d2d357
2018-07-30 20:04:55
6c64d3037141db1426fdd70ddbf91d792517c83c
diff --git a/src/pynwb/form/build/map.py b/src/pynwb/form/build/map.py index 5efe6b8e..3de93323 100644 --- a/src/pynwb/form/build/map.py +++ b/src/pynwb/form/build/map.py @@ -15,6 +15,16 @@ from .warnings import OrphanContainerWarning, MissingRequiredWarning class Proxy(object): + """ + A temporary object to represent a Container. This gets used when resolving the true location of a + Container's parent. + + Proxy objects allow simple bookeeping of all potential parents a Container may have. + + This object is used by providing all the necessary information for describing the object. This object + gets passed around and candidates are accumulated. Upon calling resolve, all saved candidates are matched + against the information (provided to the constructor). The candidate that has an exact match is returned. + """ def __init__(self, manager, source, location, namespace, data_type): self.__source = source @@ -24,24 +34,24 @@ class Proxy(object): self.__manager = manager self.__candidates = list() - @property - def candidates(self): - return self.__candidates - @property def source(self): + """The source of the object e.g. file source""" return self.__source @property def location(self): + """The location of the object. This can be thought of as a unique path""" return self.__location @property def namespace(self): + """The namespace from which the data_type of this Proxy came from""" return self.__namespace @property def data_type(self): + """The data_type of Container that should match this Proxy""" return self.__data_type @docval({"name": "object", "type": (BaseBuilder, Container), "doc": "the container or builder to get a proxy for"}) @@ -115,8 +125,12 @@ class BuildManager(object): stack = list() tmp = container while tmp is not None: - stack.append(tmp.name) - tmp = tmp.parent + if isinstance(tmp, Proxy): + stack.append(tmp.location) + break + else: + stack.append(tmp.name) + tmp = tmp.parent loc = "/".join(reversed(stack)) return Proxy(self, container.container_source, loc, ns, dt) @@ -523,6 +537,13 @@ class ObjectMapper(with_metaclass(ExtenderMeta, object)): return None attr_val = self.__get_override_attr(attr_name, container, manager) if attr_val is None: + # TODO: A message like this should be used to warn users when an expected attribute + # does not exist on a Container object + # + # if not hasattr(container, attr_name): + # msg = "Container '%s' (%s) does not have attribute '%s'" \ + # % (container.name, type(container), attr_name) + # #warnings.warn(msg) attr_val = getattr(container, attr_name, None) if attr_val is not None: attr_val = self.__convert_value(attr_val, spec)
FilteredEphys error on read ```python from pynwb import NWBFile, NWBHDF5IO from pynwb.ecephys import FilteredEphys, ElectricalSeries import numpy as np nwbfile = NWBFile("source", "a file with header data", "NB123A", '2018-06-01T00:00:00') module_lfp = nwbfile.create_processing_module( 'lfp', source='source', description='source') filt_ephys = FilteredEphys(source='source', name='name') device = nwbfile.create_device('device_name', 'source') electrode_group = nwbfile.create_electrode_group( name='electrode_group_name', source='source', description='desc', device=device, location='unknown') nwbfile.add_electrode(0, np.nan, np.nan, np.nan, # position? imp=np.nan, location='unknown', filtering='unknown', description='desc', group=electrode_group) etr = nwbfile.create_electrode_table_region([0], 'etr_name') for passband in ('theta', 'gamma'): electrical_series = ElectricalSeries(name=passband + '_phase', source='ephys_analysis', data=[1., 2., 3.], rate=0.0, electrodes=etr) filt_ephys.add_electrical_series(electrical_series) module_lfp.add_container(filt_ephys) with NWBHDF5IO('test.nwb', 'w') as io: io.write(nwbfile) with NWBHDF5IO('test.nwb', 'r') as io: io.read() ``` error: ```python Traceback (most recent call last): File "/Users/bendichter/dev/to_nwb/test.py", line 48, in <module> io.read() File "/Users/bendichter/dev/pynwb/src/pynwb/form/utils.py", line 330, in func_call return func(self, **parsed['args']) File "/Users/bendichter/dev/pynwb/src/pynwb/form/backends/io.py", line 33, in read container = self.__manager.construct(f_builder) File "/Users/bendichter/dev/pynwb/src/pynwb/form/utils.py", line 330, in func_call return func(self, **parsed['args']) File "/Users/bendichter/dev/pynwb/src/pynwb/form/build/map.py", line 178, in construct self.__resolve_parents(result) File "/Users/bendichter/dev/pynwb/src/pynwb/form/build/map.py", line 188, in __resolve_parents tmp.parent = tmp.parent.resolve() File "/Users/bendichter/dev/pynwb/src/pynwb/form/build/map.py", line 60, in resolve if self.matches(candidate): File "/Users/bendichter/dev/pynwb/src/pynwb/form/utils.py", line 330, in func_call return func(self, **parsed['args']) File "/Users/bendichter/dev/pynwb/src/pynwb/form/build/map.py", line 50, in matches obj = self.__manager.get_proxy(obj) File "/Users/bendichter/dev/pynwb/src/pynwb/form/utils.py", line 330, in func_call return func(self, **parsed['args']) File "/Users/bendichter/dev/pynwb/src/pynwb/form/build/map.py", line 99, in get_proxy return self.__get_proxy_container(obj) File "/Users/bendichter/dev/pynwb/src/pynwb/form/build/map.py", line 117, in __get_proxy_container stack.append(tmp.name) AttributeError: 'Proxy' object has no attribute 'name' ``` if you take out `module_lfp.add_container(filt_ephys)` it runs as expected.
NeurodataWithoutBorders/pynwb
diff --git a/tests/unit/form_tests/test_io_hdf5_h5tools.py b/tests/unit/form_tests/test_io_hdf5_h5tools.py index d927f00b..4e126466 100644 --- a/tests/unit/form_tests/test_io_hdf5_h5tools.py +++ b/tests/unit/form_tests/test_io_hdf5_h5tools.py @@ -12,6 +12,8 @@ from pynwb.base import TimeSeries from pynwb import NWBHDF5IO from pynwb.spec import NWBNamespace, NWBGroupSpec, NWBDatasetSpec +from pynwb.ecephys import ElectricalSeries + import tempfile import warnings @@ -400,6 +402,47 @@ class TestCacheSpec(unittest.TestCase): return types +class TestLinkResolution(unittest.TestCase): + + def test_link_resolve(self): + print("TEST_LINK_RESOLVE") + + nwbfile = NWBFile("source", "a file with header data", "NB123A", '2018-06-01T00:00:00') + device = nwbfile.create_device('device_name', 'source') + electrode_group = nwbfile.create_electrode_group( + name='electrode_group_name', + source='source', + description='desc', + device=device, + location='unknown') + nwbfile.add_electrode(0, + 1.0, 2.0, 3.0, # position? + imp=2.718, + location='unknown', + filtering='unknown', + description='desc', + group=electrode_group) + etr = nwbfile.create_electrode_table_region([0], 'etr_name') + for passband in ('theta', 'gamma'): + electrical_series = ElectricalSeries(name=passband + '_phase', + source='ephys_analysis', + data=[1., 2., 3.], + rate=0.0, + electrodes=etr) + nwbfile.add_acquisition(electrical_series) + with NWBHDF5IO(self.path, 'w') as io: + io.write(nwbfile) + with NWBHDF5IO(self.path, 'r') as io: + io.read() + + def setUp(self): + self.path = "test_link_resolve.nwb" + + def tearDown(self): + if os.path.exists(self.path): + os.remove(self.path) + + class NWBHDF5IOMultiFileTest(unittest.TestCase): """Tests for h5tools IO tools"""
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2018.1.18 chardet==3.0.4 h5py==2.7.1 idna==2.6 importlib-metadata==4.8.3 iniconfig==1.1.1 numpy==1.14.2 packaging==21.3 pandas==0.19.2 pluggy==1.0.0 py==1.11.0 -e git+https://github.com/NeurodataWithoutBorders/pynwb.git@432699f83ecb4747943a6c3bfd052b9cd5d2d357#egg=pynwb pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.7.2 pytz==2025.2 requests==2.18.4 ruamel.yaml==0.15.37 six==1.11.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.22 zipp==3.6.0
name: pynwb channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - certifi==2018.1.18 - chardet==3.0.4 - h5py==2.7.1 - idna==2.6 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - numpy==1.14.2 - packaging==21.3 - pandas==0.19.2 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.7.2 - pytz==2025.2 - requests==2.18.4 - ruamel-yaml==0.15.37 - six==1.11.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.22 - zipp==3.6.0 prefix: /opt/conda/envs/pynwb
[ "tests/unit/form_tests/test_io_hdf5_h5tools.py::TestLinkResolution::test_link_resolve" ]
[]
[ "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test__chunked_iter_fill_iterator_matched_buffer_size", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test__chunked_iter_fill_iterator_unmatched_buffer_size", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test__chunked_iter_fill_list_matched_buffer_size", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test__chunked_iter_fill_numpy_matched_buffer_size", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test__chunked_iter_fill_numpy_unmatched_buffer_size", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_copy_h5py_dataset_h5dataio_input", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_copy_h5py_dataset_input", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_link_h5py_dataset_h5dataio_input", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_link_h5py_dataset_input", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_pass_through_of_recommended_chunks", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_warning_on_linking_of_regular_array", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_warning_on_non_gzip_compression", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_warning_on_setting_io_options_on_h5dataset_input", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_data_chunk_iterator", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_data_chunk_iterator_with_compression", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_iterable", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_iterable_multidimensional_array", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_iterable_multidimensional_array_compression", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_chunked", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_compress", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_disable_default_compress", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_enable_default_compress", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_list_fillvalue", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_scalar", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_dataset_string", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_table", "tests/unit/form_tests/test_io_hdf5_h5tools.py::H5IOTest::test_write_table_nested", "tests/unit/form_tests/test_io_hdf5_h5tools.py::TestCacheSpec::test_cache_spec", "tests/unit/form_tests/test_io_hdf5_h5tools.py::NWBHDF5IOMultiFileTest::test_copy_file_with_external_links" ]
[]
BSD-3-Clause
2,845
[ "src/pynwb/form/build/map.py" ]
[ "src/pynwb/form/build/map.py" ]
elastic__rally-544
7387ca228ec1f0f7e6dc5f4938cfc62dbc4c5cba
2018-07-31 06:45:19
799e0642c27a0067931f305359a615cbf9fe2e20
diff --git a/esrally/metrics.py b/esrally/metrics.py index c01be1c2..7da700de 100644 --- a/esrally/metrics.py +++ b/esrally/metrics.py @@ -9,7 +9,7 @@ from enum import Enum, IntEnum import tabulate from esrally import time, exceptions, config, version, paths -from esrally.utils import console, io, versions +from esrally.utils import convert, console, io, versions from http.client import responses @@ -123,8 +123,7 @@ class EsClientFactory: self._config = cfg host = self._config.opts("reporting", "datastore.host") port = self._config.opts("reporting", "datastore.port") - # poor man's boolean conversion - secure = self._config.opts("reporting", "datastore.secure") == "True" + secure = convert.to_bool(self._config.opts("reporting", "datastore.secure")) user = self._config.opts("reporting", "datastore.user") password = self._config.opts("reporting", "datastore.password") verify = self._config.opts("reporting", "datastore.ssl.verification_mode", default_value="full", mandatory=False) != "none"
datastore.secure boolean values seem to be case sensitive in rally.ini Specifying `datastore.secure = true` in `rally.ini` gives the misleading error message: ``` esrally.exceptions.SystemSetupError: ('Could not connect to your Elasticsearch metrics store. Please check that it is running on host [https://<url>] at port [9243] or fix the configuration in [<home_dir>/.rally/rally.ini].', None) ]. ``` It works when setting it to `True`, but we should be case insensitive when parsing boolean values in this file.
elastic/rally
diff --git a/tests/metrics_test.py b/tests/metrics_test.py index ee646c45..68413848 100644 --- a/tests/metrics_test.py +++ b/tests/metrics_test.py @@ -3,6 +3,7 @@ import datetime import logging import unittest.mock as mock import random +import string from unittest import TestCase import elasticsearch.exceptions @@ -120,6 +121,39 @@ class EsClientTests(TestCase): def __init__(self, hosts): self.transport = EsClientTests.TransportMock(hosts) + @mock.patch("esrally.client.EsClientFactory") + def test_config_opts_parsing(self, client_esclientfactory): + cfg = config.Config() + + _datastore_host = ".".join([str(random.randint(1,254)) for _ in range(4)]) + _datastore_port = random.randint(1024,65535) + _datastore_secure = random.choice(["True", "true"]) + _datastore_user = "".join([random.choice(string.ascii_letters) for _ in range(8)]) + _datastore_password = "".join([random.choice(string.ascii_letters + string.digits + "_-@#$/") for _ in range(12)]) + _datastore_verify_certs = random.choice([True, False]) + + cfg.add(config.Scope.applicationOverride, "reporting", "datastore.host", _datastore_host) + cfg.add(config.Scope.applicationOverride, "reporting", "datastore.port", _datastore_port) + cfg.add(config.Scope.applicationOverride, "reporting", "datastore.secure", _datastore_secure) + cfg.add(config.Scope.applicationOverride, "reporting", "datastore.user", _datastore_user) + cfg.add(config.Scope.applicationOverride, "reporting", "datastore.password", _datastore_password) + if not _datastore_verify_certs: + cfg.add(config.Scope.applicationOverride, "reporting", "datastore.ssl.verification_mode", "none") + + f = metrics.EsClientFactory(cfg) + expected_client_options = { + "use_ssl": True, + "timeout": 120, + "basic_auth_user": _datastore_user, + "basic_auth_password": _datastore_password, + "verify_certs": _datastore_verify_certs + } + + client_esclientfactory.assert_called_with( + hosts=[{"host": _datastore_host, "port": _datastore_port}], + client_options=expected_client_options + ) + def test_raises_sytem_setup_error_on_connection_problems(self): def raise_connection_error(): raise elasticsearch.exceptions.ConnectionError("unit-test")
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-benchmark" ], "pre_install": [ "apt-get update", "apt-get install -y gcc python3-pip python3-dev" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 elasticsearch==6.2.0 -e git+https://github.com/elastic/rally.git@7387ca228ec1f0f7e6dc5f4938cfc62dbc4c5cba#egg=esrally importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==2.9.5 jsonschema==2.5.1 MarkupSafe==2.0.1 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==5.4.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work py-cpuinfo==3.2.0 pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-benchmark==3.4.1 tabulate==0.8.1 thespian==3.9.2 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.22 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: rally channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - elasticsearch==6.2.0 - jinja2==2.9.5 - jsonschema==2.5.1 - markupsafe==2.0.1 - psutil==5.4.0 - py-cpuinfo==3.2.0 - pytest-benchmark==3.4.1 - tabulate==0.8.1 - thespian==3.9.2 - urllib3==1.22 prefix: /opt/conda/envs/rally
[ "tests/metrics_test.py::EsClientTests::test_config_opts_parsing" ]
[]
[ "tests/metrics_test.py::ExtractUserTagsTests::test_extracts_proper_user_tags", "tests/metrics_test.py::ExtractUserTagsTests::test_missing_comma_raises_error", "tests/metrics_test.py::ExtractUserTagsTests::test_missing_value_raises_error", "tests/metrics_test.py::ExtractUserTagsTests::test_no_tags_returns_empty_dict", "tests/metrics_test.py::EsClientTests::test_fails_after_too_many_errors", "tests/metrics_test.py::EsClientTests::test_raises_rally_error_on_unknown_problems", "tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_authentication_problems", "tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_authorization_problems", "tests/metrics_test.py::EsClientTests::test_raises_sytem_setup_error_on_connection_problems", "tests/metrics_test.py::EsClientTests::test_retries_on_various_transport_errors", "tests/metrics_test.py::EsMetricsTests::test_get_error_rate_additional_unknown_key", "tests/metrics_test.py::EsMetricsTests::test_get_error_rate_explicit_one", "tests/metrics_test.py::EsMetricsTests::test_get_error_rate_explicit_zero", "tests/metrics_test.py::EsMetricsTests::test_get_error_rate_implicit_one", "tests/metrics_test.py::EsMetricsTests::test_get_error_rate_implicit_zero", "tests/metrics_test.py::EsMetricsTests::test_get_error_rate_mixed", "tests/metrics_test.py::EsMetricsTests::test_get_median", "tests/metrics_test.py::EsMetricsTests::test_get_value", "tests/metrics_test.py::EsMetricsTests::test_put_doc_no_meta_data", "tests/metrics_test.py::EsMetricsTests::test_put_doc_with_metadata", "tests/metrics_test.py::EsMetricsTests::test_put_value_with_explicit_timestamps", "tests/metrics_test.py::EsMetricsTests::test_put_value_with_meta_info", "tests/metrics_test.py::EsMetricsTests::test_put_value_without_meta_info", "tests/metrics_test.py::EsRaceStoreTests::test_store_race", "tests/metrics_test.py::EsResultsStoreTests::test_store_results", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_externalize_and_bulk_add", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_by_sample_type", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_mixed", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_error_rate_zero_without_samples", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_median", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_percentile", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_get_value", "tests/metrics_test.py::InMemoryMetricsStoreTests::test_meta_data_per_document", "tests/metrics_test.py::FileRaceStoreTests::test_store_race" ]
[]
Apache License 2.0
2,846
[ "esrally/metrics.py" ]
[ "esrally/metrics.py" ]
joke2k__faker-790
29dff0a0f2a31edac21a18cfa50b5bc9206304b2
2018-07-31 14:58:53
29dff0a0f2a31edac21a18cfa50b5bc9206304b2
diff --git a/faker/providers/address/en_CA/__init__.py b/faker/providers/address/en_CA/__init__.py index 4d8963ed..7e7e989a 100644 --- a/faker/providers/address/en_CA/__init__.py +++ b/faker/providers/address/en_CA/__init__.py @@ -316,7 +316,7 @@ class Provider(AddressProvider): """ return self.random_element(self.postal_code_letters) - def postalcode(self): + def postcode(self): """ Replaces all question mark ('?') occurrences with a random letter from postal_code_formats then passes result to @@ -326,3 +326,6 @@ class Provider(AddressProvider): lambda x: self.postal_code_letter(), self.random_element(self.postal_code_formats)) return self.numerify(temp) + + def postalcode(self): + return self.postcode() diff --git a/faker/providers/address/en_US/__init__.py b/faker/providers/address/en_US/__init__.py index c15c771a..451d5d72 100644 --- a/faker/providers/address/en_US/__init__.py +++ b/faker/providers/address/en_US/__init__.py @@ -332,7 +332,7 @@ class Provider(AddressProvider): def state_abbr(self): return self.random_element(self.states_abbr) - def zipcode(self): + def postcode(self): return "%05d" % self.generator.random.randint(501, 99950) def zipcode_plus4(self): @@ -364,8 +364,11 @@ class Provider(AddressProvider): return self.numerify(self.military_dpo_format) # Aliases + def zipcode(self): + return self.postcode() + def postalcode(self): - return self.zipcode() + return self.postcode() def postalcode_plus4(self): return self.zipcode_plus4() diff --git a/faker/providers/address/ja_JP/__init__.py b/faker/providers/address/ja_JP/__init__.py index 3444e6a1..9d08197f 100644 --- a/faker/providers/address/ja_JP/__init__.py +++ b/faker/providers/address/ja_JP/__init__.py @@ -347,9 +347,12 @@ class Provider(AddressProvider): """ return self.random_element(self.building_names) - def zipcode(self): + def postcode(self): """ :example '101-1212' """ return "%03d-%04d" % (self.generator.random.randint(0, 999), self.generator.random.randint(0, 9999)) + + def zipcode(self): + return self.postcode() diff --git a/faker/providers/address/ko_KR/__init__.py b/faker/providers/address/ko_KR/__init__.py index 0a0ee206..956f7e11 100644 --- a/faker/providers/address/ko_KR/__init__.py +++ b/faker/providers/address/ko_KR/__init__.py @@ -381,8 +381,14 @@ class Provider(AddressProvider): """ return self.bothify(self.random_element(self.postcode_formats)) - def postal_code(self): + def postcode(self): """ :example 12345 """ return self.bothify(self.random_element(self.new_postal_code_formats)) + + def postal_code(self): + """ + :example 12345 + """ + return self.postcode()
en_CA should implement postcode, not postalcode All of the address providers use `postcode` to generate post codes. This is misspelled as `postalcode` by the localization for `en_CA`. ### Expected behavior It would be easier to use faker in a multilingual context if this were spelled consistently. Recommended fix: provide a courtesy alias definition of `postcode` so that CA works the same as everyone else. Perhaps `postalcode` should be deprecated, too. ### Actual behavior ```bash $ grep -r "{{postalcode}}" * | grep -v ".pyc" providers/address/en_CA/__init__.py: "{{street_address}}\n{{city}}, {{province_abbr}} {{postalcode}}", $ grep -r "{{postcode}}" * | grep -v ".pyc" providers/address/fa_IR/__init__.py: "{{street_address}}\n{{city}}, {{state}} {{postcode}}", providers/address/fi_FI/__init__.py: address_formats = ("{{street_address}}\n{{postcode}} {{city}}", ) providers/address/nl_BE/__init__.py: "{{street_address}}\n{{postcode}}\n{{city}}", providers/address/nl_BE/__init__.py: "{{street_address}}\n{{postcode}} {{city}}", providers/address/sk_SK/__init__.py: address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) providers/address/uk_UA/__init__.py: address_formats = ['{{street_address}}, {{city}}, {{postcode}}'] providers/address/en_US/__init__.py: "{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}", providers/address/en_US/__init__.py: ("{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}", 25), providers/address/en_US/__init__.py: ("{{military_apo}}\nAPO {{military_state}} {{postcode}}", 1), providers/address/en_US/__init__.py: ("{{military_ship}} {{last_name}}\nFPO {{military_state}} {{postcode}}", 1), providers/address/en_US/__init__.py: ("{{military_dpo}}\nDPO {{military_state}} {{postcode}}", 1), providers/address/ne_NP/__init__.py: "{{street_name}} {{building_prefix}} {{building_number}} \n{{city}}\n{{district}} {{postcode}}", providers/address/sv_SE/__init__.py: address_formats = ("{{street_address}}\n{{postcode}} {{city}}", ) providers/address/id_ID/__init__.py: '{{street_address}}\n{{city}}, {{state}} {{postcode}}', providers/address/id_ID/__init__.py: '{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}', providers/address/pt_BR/__init__.py: "{{street_address}}\n{{bairro}}\n{{postcode}} {{city}} / {{estado_sigla}}", ) providers/address/it_IT/__init__.py: "{{street_address}}\n{{city}}, {{postcode}} {{state}} ({{state_abbr}})", providers/address/zh_TW/__init__.py: "{{postcode}} {{city}}{{street_address}}{{secondary_address}}", ) providers/address/pt_PT/__init__.py: "{{street_address}}\n{{postcode}} {{city}}", providers/address/hr_HR/__init__.py: address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) providers/address/cs_CZ/__init__.py: address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) providers/address/__init__.py: address_formats = ('{{street_address}} {{postcode}} {{city}}', ) providers/address/fr_CH/__init__.py: address_formats = ("{{street_address}}\n{{postcode}} {{city}}",) providers/address/he_IL/__init__.py: address_formats = ('{{street_address}}, {{city}}, {{postcode}}', ) providers/address/hi_IN/__init__.py: address_formats = ('{{street_address}}\n{{city}} {{postcode}}', providers/address/hi_IN/__init__.py: '{{street_address}}\n{{city}}-{{postcode}}',) providers/address/sl_SI/__init__.py: address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) providers/address/zh_CN/__init__.py: "{{province}}{{city}}{{district}}{{street_address}} {{postcode}}",) providers/address/nl_NL/__init__.py: "{{street_address}}\n{{postcode}}\n{{city}}", providers/address/hu_HU/__init__.py: address_formats = ("{{street_address}}\n{{postcode}} {{city}}",) providers/address/en_AU/__init__.py: "{{street_address}}\n{{city}}, {{state_abbr}}, {{postcode}}", ) providers/address/en_GB/__init__.py: "{{street_address}}\n{{city}}\n{{postcode}}", providers/address/es_MX/__init__.py: "{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}", providers/address/ru_RU/__init__.py: address_formats = ('{{city}}, {{street_address}}, {{postcode}}', ) providers/address/es_ES/__init__.py: "{{street_address}}\n{{city}}, {{postcode}}", providers/address/el_GR/__init__.py: "{{street_address}},\n{{postcode}} {{city}}", providers/address/el_GR/__init__.py: "{{street_address}}, {{postcode}} {{city}}", providers/address/no_NO/__init__.py: address_formats = ('{{street_address}}, {{postcode}} {{city}}',) providers/address/pl_PL/__init__.py: "{{street_address}}\n{{postcode}} {{city}}", providers/address/fr_FR/__init__.py: "{{street_address}}\n{{postcode}} {{city}}", providers/address/de_AT/__init__.py: address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) providers/address/de_DE/__init__.py: address_formats = ('{{street_address}}\n{{postcode}} {{city}}', ) ```
joke2k/faker
diff --git a/tests/providers/test_address.py b/tests/providers/test_address.py index e7385664..585e0869 100644 --- a/tests/providers/test_address.py +++ b/tests/providers/test_address.py @@ -230,6 +230,12 @@ class TestEnCA(unittest.TestCase): def setUp(self): self.factory = Faker('en_CA') + def test_postcode(self): + for _ in range(100): + postcode = self.factory.postcode() + assert re.match("[A-Z][0-9][A-Z] ?[0-9][A-Z][0-9]", + postcode) + def test_postalcode(self): for _ in range(100): postalcode = self.factory.postalcode() @@ -292,6 +298,11 @@ class TestEnUS(unittest.TestCase): assert isinstance(state_abbr, string_types) assert state_abbr in EnUsProvider.states_abbr + def test_postcode(self): + for _ in range(100): + postcode = self.factory.postcode() + assert re.match("\d{5}", postcode) + def test_zipcode(self): for _ in range(100): zipcode = self.factory.zipcode() @@ -420,6 +431,10 @@ class TestJaJP(unittest.TestCase): assert isinstance(building_name, string_types) assert building_name in JaProvider.building_names + postcode = self.factory.postcode() + assert isinstance(postcode, string_types) + assert re.match("\d{3}-\d{4}", postcode) + zipcode = self.factory.zipcode() assert isinstance(zipcode, string_types) assert re.match("\d{3}-\d{4}", zipcode) @@ -428,6 +443,26 @@ class TestJaJP(unittest.TestCase): assert isinstance(address, string_types) +class TestKoKR(unittest.TestCase): + """ Tests addresses in the ko_KR locale """ + + def setUp(self): + self.factory = Faker('ko_KR') + + def test_address(self): + postcode = self.factory.postcode() + assert isinstance(postcode, string_types) + assert re.match("\d{5}", postcode) + + postal_code = self.factory.postal_code() + assert isinstance(postal_code, string_types) + assert re.match("\d{5}", postal_code) + + old_postal_code = self.factory.old_postal_code() + assert isinstance(old_postal_code, string_types) + assert re.match("\d{3}-\d{3}", old_postal_code) + + class TestNeNP(unittest.TestCase): """ Tests addresses in the ne_NP locale """
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 4 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "tests/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 dnspython==2.2.1 email-validator==1.0.3 -e git+https://github.com/joke2k/faker.git@29dff0a0f2a31edac21a18cfa50b5bc9206304b2#egg=Faker idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 mock==2.0.0 packaging==21.3 pbr==6.1.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 six==1.17.0 text-unidecode==1.2 tomli==1.2.3 typing_extensions==4.1.1 UkPostcodeParser==1.1.2 zipp==3.6.0
name: faker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - dnspython==2.2.1 - email-validator==1.0.3 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mock==2.0.0 - packaging==21.3 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - six==1.17.0 - text-unidecode==1.2 - tomli==1.2.3 - typing-extensions==4.1.1 - ukpostcodeparser==1.1.2 - zipp==3.6.0 prefix: /opt/conda/envs/faker
[ "tests/providers/test_address.py::TestEnCA::test_postcode", "tests/providers/test_address.py::TestJaJP::test_address", "tests/providers/test_address.py::TestKoKR::test_address" ]
[]
[ "tests/providers/test_address.py::TestBaseProvider::test_alpha_2_country_codes", "tests/providers/test_address.py::TestBaseProvider::test_alpha_2_country_codes_as_default", "tests/providers/test_address.py::TestBaseProvider::test_alpha_3_country_codes", "tests/providers/test_address.py::TestBaseProvider::test_bad_country_code_representation", "tests/providers/test_address.py::TestAr_AA::test_alpha_2_country_codes", "tests/providers/test_address.py::TestAr_AA::test_alpha_2_country_codes_as_default", "tests/providers/test_address.py::TestAr_AA::test_alpha_3_country_codes", "tests/providers/test_address.py::TestAr_AA::test_bad_country_code_representation", "tests/providers/test_address.py::TestDeAT::test_city", "tests/providers/test_address.py::TestDeAT::test_country", "tests/providers/test_address.py::TestDeAT::test_latitude", "tests/providers/test_address.py::TestDeAT::test_longitude", "tests/providers/test_address.py::TestDeAT::test_postcode", "tests/providers/test_address.py::TestDeAT::test_state", "tests/providers/test_address.py::TestDeAT::test_street_suffix_long", "tests/providers/test_address.py::TestDeAT::test_street_suffix_short", "tests/providers/test_address.py::TestDeDE::test_city", "tests/providers/test_address.py::TestDeDE::test_country", "tests/providers/test_address.py::TestDeDE::test_state", "tests/providers/test_address.py::TestDeDE::test_street_suffix_long", "tests/providers/test_address.py::TestDeDE::test_street_suffix_short", "tests/providers/test_address.py::TestFiFI::test_city", "tests/providers/test_address.py::TestFiFI::test_street_suffix", "tests/providers/test_address.py::TestElGR::test_city", "tests/providers/test_address.py::TestElGR::test_latlng", "tests/providers/test_address.py::TestElGR::test_line_address", "tests/providers/test_address.py::TestElGR::test_region", "tests/providers/test_address.py::TestEnAU::test_city_prefix", "tests/providers/test_address.py::TestEnAU::test_postcode", "tests/providers/test_address.py::TestEnAU::test_state", "tests/providers/test_address.py::TestEnAU::test_state_abbr", "tests/providers/test_address.py::TestEnCA::test_city_prefix", "tests/providers/test_address.py::TestEnCA::test_postal_code_letter", "tests/providers/test_address.py::TestEnCA::test_postalcode", "tests/providers/test_address.py::TestEnCA::test_province", "tests/providers/test_address.py::TestEnCA::test_province_abbr", "tests/providers/test_address.py::TestEnCA::test_secondary_address", "tests/providers/test_address.py::TestEnGB::test_postcode", "tests/providers/test_address.py::TestEnUS::test_city_prefix", "tests/providers/test_address.py::TestEnUS::test_military_apo", "tests/providers/test_address.py::TestEnUS::test_military_dpo", "tests/providers/test_address.py::TestEnUS::test_military_ship", "tests/providers/test_address.py::TestEnUS::test_military_state", "tests/providers/test_address.py::TestEnUS::test_postcode", "tests/providers/test_address.py::TestEnUS::test_state", "tests/providers/test_address.py::TestEnUS::test_state_abbr", "tests/providers/test_address.py::TestEnUS::test_zipcode", "tests/providers/test_address.py::TestEnUS::test_zipcode_plus4", "tests/providers/test_address.py::TestHuHU::test_address", "tests/providers/test_address.py::TestHuHU::test_postcode_first_digit", "tests/providers/test_address.py::TestHuHU::test_street_address", "tests/providers/test_address.py::TestHuHU::test_street_address_with_county", "tests/providers/test_address.py::TestNeNP::test_address", "tests/providers/test_address.py::TestNoNO::test_address", "tests/providers/test_address.py::TestNoNO::test_city_suffix", "tests/providers/test_address.py::TestNoNO::test_postcode", "tests/providers/test_address.py::TestNoNO::test_street_suffix", "tests/providers/test_address.py::TestZhTW::test_address", "tests/providers/test_address.py::TestZhCN::test_address", "tests/providers/test_address.py::TestPtBr::test_address", "tests/providers/test_address.py::TestPtPT::test_distrito", "tests/providers/test_address.py::TestPtPT::test_freguesia" ]
[]
MIT License
2,847
[ "faker/providers/address/ja_JP/__init__.py", "faker/providers/address/ko_KR/__init__.py", "faker/providers/address/en_CA/__init__.py", "faker/providers/address/en_US/__init__.py" ]
[ "faker/providers/address/ja_JP/__init__.py", "faker/providers/address/ko_KR/__init__.py", "faker/providers/address/en_CA/__init__.py", "faker/providers/address/en_US/__init__.py" ]
dask__dask-3833
b341ac841234cb06c170c7af0fc65b2827be2cef
2018-07-31 22:43:11
b8816eb498bfe4a24ace89484b2df2af3d181bfe
TomAugspurger: Does this also close https://github.com/dask/dask/issues/3660?
diff --git a/dask/bytes/core.py b/dask/bytes/core.py index 73b0eae33..aa395eead 100644 --- a/dask/bytes/core.py +++ b/dask/bytes/core.py @@ -263,6 +263,41 @@ def infer_options(urlpath): return urlpath, protocol, options +def expand_paths_if_needed(paths, mode, num, fs, name_function): + """Expand paths if they have a ``*`` in them. + + :param paths: list of paths + mode : str + Mode in which to open files. + num : int + If opening in writing mode, number of files we expect to create. + fs : filesystem object + name_function : callable + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + :return: list of paths + """ + expanded_paths = [] + paths = list(paths) + if 'w' in mode and sum([1 for p in paths if '*' in p]) > 1: + raise ValueError("When writing data, only one filename mask can be specified.") + for curr_path in paths: + if '*' in curr_path: + if 'w' in mode: + # expand using name_function + expanded_paths.extend(_expand_paths(curr_path, name_function, num)) + else: + # expand using glob + expanded_paths.extend(fs.glob(curr_path)) + else: + expanded_paths.append(curr_path) + # if we generated more paths that asked for, trim the list + if 'w' in mode and len(expanded_paths) > num: + expanded_paths = expanded_paths[:num] + return expanded_paths + + def get_fs_token_paths(urlpath, mode='rb', num=1, name_function=None, storage_options=None): """Filesystem, deterministic token, and paths from a urlpath and options. @@ -294,9 +329,8 @@ def get_fs_token_paths(urlpath, mode='rb', num=1, name_function=None, raise ValueError("When specifying a list of paths, all paths must " "share the same protocol and options") update_storage_options(options, storage_options) - paths = list(paths) - fs, fs_token = get_fs(protocol, options) + paths = expand_paths_if_needed(paths, mode, num, fs, name_function) elif isinstance(urlpath, (str, unicode)) or hasattr(urlpath, 'name'): urlpath, protocol, options = infer_options(urlpath) diff --git a/dask/dataframe/tseries/resample.py b/dask/dataframe/tseries/resample.py index 3f5b8f762..de3e15824 100644 --- a/dask/dataframe/tseries/resample.py +++ b/dask/dataframe/tseries/resample.py @@ -38,7 +38,8 @@ def _resample_series(series, start, end, reindex_closed, rule, resample_kwargs, how, fill_value, how_args, how_kwargs): out = getattr(series.resample(rule, **resample_kwargs), how)(*how_args, **how_kwargs) return out.reindex(pd.date_range(start, end, freq=rule, - closed=reindex_closed), + closed=reindex_closed, + name=out.index.name), fill_value=fill_value)
Resample Dask Dataframe results in loss of index column name I created this self containable example to demonstrate a bug I found when resampling a Dask Dataframe. As can be seen below, after resampling the Pandas Dataframe it still contains the name of the index, but after resampling the Dask Dataframe it loses the name of the index. It's interesting to note that when doing `df2.index.name` it shows the correct index name, but when doing `df2.head()` this doesn't happen. <details> <summary>Code</summary> ``` import numpy as np import pandas as pd from numpy import sqrt import matplotlib.pyplot as plt import dask import dask.dataframe as dd from datetime import datetime, timedelta # Create random time series date_today = datetime.now() days = pd.date_range(date_today, date_today + timedelta(20), freq='D') np.random.seed(seed=1111) data = np.random.randint(1, high=100, size=len(days)) # Create Pandas Dataframe df = pd.DataFrame({'date': days, 'values': data}) df = df.set_index('date') print(df.head()) print(df.resample('D').mean().head()) # Create Dask Dataframe df2 = dd.from_pandas(df, npartitions=4) print(df2.head()) print(df2.resample('D').mean().head()) df2.index.name ``` </details> <details> <summary>Output</summary> ``` values date 2018-07-30 16:10:31.049569 29 2018-07-31 16:10:31.049569 56 2018-08-01 16:10:31.049569 82 2018-08-02 16:10:31.049569 13 2018-08-03 16:10:31.049569 35 values date 2018-07-30 29 2018-07-31 56 2018-08-01 82 2018-08-02 13 2018-08-03 35 values date 2018-07-30 16:10:31.049569 29 2018-07-31 16:10:31.049569 56 2018-08-01 16:10:31.049569 82 2018-08-02 16:10:31.049569 13 2018-08-03 16:10:31.049569 35 values 2018-07-30 29 2018-07-31 56 2018-08-01 82 2018-08-02 13 2018-08-03 35 'date' ``` </details>
dask/dask
diff --git a/dask/bytes/tests/test_local.py b/dask/bytes/tests/test_local.py index dbe58ae0d..8f723a624 100644 --- a/dask/bytes/tests/test_local.py +++ b/dask/bytes/tests/test_local.py @@ -29,6 +29,12 @@ files = {'.test.accounts.1.json': (b'{"amount": 100, "name": "Alice"}\n' b'{"amount": 800, "name": "Dennis"}\n')} +csv_files = {'.test.fakedata.1.csv': (b'a,b\n' + b'1,2\n'), + '.test.fakedata.2.csv': (b'a,b\n' + b'3,4\n')} + + try: # used only in test_with_urls - may be more generally useful import pathlib @@ -86,6 +92,27 @@ def test_urlpath_inference_errors(): 'should/not/be.csv' 'allowed.csv'}) +def test_urlpath_expand_read(): + """Make sure * is expanded in file paths when reading.""" + # when reading, globs should be expanded to read files by mask + with filetexts(csv_files, mode='b'): + _, _, paths = get_fs_token_paths('.*.csv') + assert len(paths) == 2 + _, _, paths = get_fs_token_paths(['.*.csv']) + assert len(paths) == 2 + + +def test_urlpath_expand_write(): + """Make sure * is expanded in file paths when writing.""" + _, _, paths = get_fs_token_paths('prefix-*.csv', mode='wb', num=2) + assert paths == ['prefix-0.csv', 'prefix-1.csv'] + _, _, paths = get_fs_token_paths(['prefix-*.csv'], mode='wb', num=2) + assert paths == ['prefix-0.csv', 'prefix-1.csv'] + # we can read with multiple masks, but not write + with pytest.raises(ValueError): + _, _, paths = get_fs_token_paths(['prefix1-*.csv', 'prefix2-*.csv'], mode='wb', num=2) + + def test_read_bytes(): with filetexts(files, mode='b'): sample, values = read_bytes('.test.accounts.*') diff --git a/dask/dataframe/tseries/tests/test_resample.py b/dask/dataframe/tseries/tests/test_resample.py index 7c3b4f7bf..2b20f3e30 100644 --- a/dask/dataframe/tseries/tests/test_resample.py +++ b/dask/dataframe/tseries/tests/test_resample.py @@ -77,3 +77,19 @@ def test_unknown_divisions_error(): assert False except ValueError as e: assert 'divisions' in str(e) + + +def test_resample_index_name(): + import numpy as np + from datetime import datetime, timedelta + + date_today = datetime.now() + days = pd.date_range(date_today, date_today + timedelta(20), freq='D') + data = np.random.randint(1, high=100, size=len(days)) + + df = pd.DataFrame({'date': days, 'values': data}) + df = df.set_index('date') + + ddf = dd.from_pandas(df, npartitions=4) + + assert ddf.resample('D').mean().head().index.name == "date"
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.18
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[complete]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 click==8.0.4 cloudpickle==2.2.1 -e git+https://github.com/dask/dask.git@b341ac841234cb06c170c7af0fc65b2827be2cef#egg=dask distributed==1.28.1 HeapDict==1.0.1 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work locket==1.0.0 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work msgpack==1.0.5 numpy==1.19.5 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pandas==1.1.5 partd==1.2.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work psutil==7.0.0 py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 python-dateutil==2.9.0.post0 pytz==2025.2 PyYAML==6.0.1 six==1.17.0 sortedcontainers==2.4.0 tblib==1.7.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work toolz==0.12.0 tornado==6.1 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zict==2.1.0 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: dask channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - click==8.0.4 - cloudpickle==2.2.1 - distributed==1.28.1 - heapdict==1.0.1 - locket==1.0.0 - msgpack==1.0.5 - numpy==1.19.5 - pandas==1.1.5 - partd==1.2.0 - psutil==7.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyyaml==6.0.1 - six==1.17.0 - sortedcontainers==2.4.0 - tblib==1.7.0 - toolz==0.12.0 - tornado==6.1 - zict==2.1.0 prefix: /opt/conda/envs/dask
[ "dask/bytes/tests/test_local.py::test_urlpath_expand_read", "dask/bytes/tests/test_local.py::test_urlpath_expand_write" ]
[ "dask/bytes/tests/test_local.py::test_not_found", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-2-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-count-5-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-2-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-mean-5-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-2-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[series-ohlc-5-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-2-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-count-5-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-2-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-mean-5-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-2-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-30T-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-30T-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-30T-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-30T-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-h-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-h-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-h-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-h-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-d-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-d-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-d-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-d-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-w-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-w-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-w-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-w-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-M-right-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-M-right-left]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-M-left-right]", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample[frame-ohlc-5-M-left-left]", "dask/dataframe/tseries/tests/test_resample.py::test_resample_agg", "dask/dataframe/tseries/tests/test_resample.py::test_resample_agg_passes_kwargs", "dask/dataframe/tseries/tests/test_resample.py::test_resample_index_name" ]
[ "dask/bytes/tests/test_local.py::test_urlpath_inference_strips_protocol", "dask/bytes/tests/test_local.py::test_urlpath_inference_errors", "dask/bytes/tests/test_local.py::test_read_bytes", "dask/bytes/tests/test_local.py::test_read_bytes_sample_delimiter", "dask/bytes/tests/test_local.py::test_read_bytes_blocksize_none", "dask/bytes/tests/test_local.py::test_read_bytes_blocksize_float", "dask/bytes/tests/test_local.py::test_with_urls", "dask/bytes/tests/test_local.py::test_with_paths", "dask/bytes/tests/test_local.py::test_read_bytes_block", "dask/bytes/tests/test_local.py::test_read_bytes_delimited", "dask/bytes/tests/test_local.py::test_compression[gzip-None]", "dask/bytes/tests/test_local.py::test_compression[None-None]", "dask/bytes/tests/test_local.py::test_compression[xz-None]", "dask/bytes/tests/test_local.py::test_compression[bz2-None]", "dask/bytes/tests/test_local.py::test_compression[None-10]", "dask/bytes/tests/test_local.py::test_open_files", "dask/bytes/tests/test_local.py::test_open_files_text_mode[utf-8]", "dask/bytes/tests/test_local.py::test_open_files_text_mode[ascii]", "dask/bytes/tests/test_local.py::test_open_files_compression[gzip-rt]", "dask/bytes/tests/test_local.py::test_open_files_compression[gzip-rb]", "dask/bytes/tests/test_local.py::test_open_files_compression[None-rt]", "dask/bytes/tests/test_local.py::test_open_files_compression[None-rb]", "dask/bytes/tests/test_local.py::test_open_files_compression[xz-rt]", "dask/bytes/tests/test_local.py::test_open_files_compression[xz-rb]", "dask/bytes/tests/test_local.py::test_open_files_compression[bz2-rt]", "dask/bytes/tests/test_local.py::test_open_files_compression[bz2-rb]", "dask/bytes/tests/test_local.py::test_getsize[None]", "dask/bytes/tests/test_local.py::test_bad_compression", "dask/bytes/tests/test_local.py::test_open_files_write[compression_opener0]", "dask/bytes/tests/test_local.py::test_open_files_write[compression_opener1]", "dask/bytes/tests/test_local.py::test_pickability_of_lazy_files", "dask/bytes/tests/test_local.py::test_py2_local_bytes", "dask/bytes/tests/test_local.py::test_abs_paths", "dask/dataframe/tseries/tests/test_resample.py::test_series_resample_not_implemented", "dask/dataframe/tseries/tests/test_resample.py::test_unknown_divisions_error" ]
[]
BSD 3-Clause "New" or "Revised" License
2,849
[ "dask/bytes/core.py", "dask/dataframe/tseries/resample.py" ]
[ "dask/bytes/core.py", "dask/dataframe/tseries/resample.py" ]
sendgrid__sendgrid-python-593
1ac58c6fc491554bc24573393ae6242e579dbabb
2018-08-01 17:31:20
56a0ad09e7343e0c95321931bea0745927ad61ed
diff --git a/examples/helpers/mail/mail_example.py b/examples/helpers/mail_example.py similarity index 91% rename from examples/helpers/mail/mail_example.py rename to examples/helpers/mail_example.py index b2de7f0..0a5b868 100644 --- a/examples/helpers/mail/mail_example.py +++ b/examples/helpers/mail_example.py @@ -1,8 +1,6 @@ -import json -import os -import urllib2 +from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import * -from sendgrid import * + # NOTE: you will need move this file to the root # directory of this project to execute properly. @@ -217,3 +215,27 @@ send_hello_email() # this will only send an email if you set SandBox Mode to False send_kitchen_sink() + + +def dynamic_template_usage(): + """ + Sample usage of dynamic (handlebars) transactional templates. + To make this work, you should have dynamic template created within your + SendGrid account. For this particular example, template may be like:: + + <p>Hello, {{name}}! Your current balance is {{balance}}<p> + + """ + mail = Mail() + mail.from_email = '[email protected]' + mail.template_id = 'd-your-dynamic-template-uid' + p = Personalization() + p.add_to(Email('[email protected]')) + p.dynamic_template_data = { + 'name': 'Bob', + 'balance': 42 + } + mail.add_personalization(p) + + sg = SendGridAPIClient(apikey='SG.your-api-key') + sg.client.mail.send.post(request_body=mail.get()) diff --git a/sendgrid/helpers/mail/personalization.py b/sendgrid/helpers/mail/personalization.py index 8bb4bed..8032af9 100644 --- a/sendgrid/helpers/mail/personalization.py +++ b/sendgrid/helpers/mail/personalization.py @@ -1,6 +1,10 @@ class Personalization(object): """A Personalization defines who should receive an individual message and how that message should be handled. + + :var dynamic_template_data: data for dynamic transactional template. + Should be JSON-serializeable structure. No pre-processing sill be done + prior to sending this via http client. """ def __init__(self): @@ -13,6 +17,7 @@ class Personalization(object): self._substitutions = [] self._custom_args = [] self._send_at = None + self.dynamic_template_data = None @property def tos(self): @@ -198,4 +203,8 @@ class Personalization(object): if self.send_at is not None: personalization["send_at"] = self.send_at + + if self.dynamic_template_data is not None: + personalization['dynamic_template_data'] = self.dynamic_template_data + return personalization diff --git a/use_cases/transational_templates.md b/use_cases/transational_templates.md index d3e3a00..2d74f92 100644 --- a/use_cases/transational_templates.md +++ b/use_cases/transational_templates.md @@ -66,6 +66,38 @@ print(response.body) print(response.headers) ``` +### With dynamic templates + +Sendgrid dynamic templates let you leverage power of [handlebars](https://handlebarsjs.com/) +syntax to easily manage complex dynamic content in transactional emails. + +To check this example snippet, create +[transactional email template](https://sendgrid.com/dynamic_templates) with code like +```html +<p>Hello, {{name}}! Your current balance is {{balance}}<p> +``` + +Than send email based on it, providing context for substitutions: +```python +from sendgrid import SendGridAPIClient +from sendgrid.helpers.mail import Email, Personalization + + +sg = SendGridAPIClient(apikey='SG.your-api-key') + +mail = Mail() +mail.from_email='[email protected]' +mail.template_id = 'd-your-dynamic-template-uid' +p = Personalization() +p.add_to(Email('[email protected]')) +p.dynamic_template_data = {'name': 'Bob', 'balance': 42} +mail.add_personalization(p) + +sg.client.mail.send.post(request_body=mail.get()) +``` + +Read more about dynamic templates in [docs](https://sendgrid.com/docs/User_Guide/Transactional_Templates/how_to_send_an_email_with_transactional_templates.html) + ## Without Mail Helper Class ```python @@ -113,4 +145,4 @@ except urllib.HTTPError as e: print(response.status_code) print(response.body) print(response.headers) -``` \ No newline at end of file +```
Add Dynamic Template Support #### Issue Summary On 7/24/2018, our team publicly launched dynamic content for transactional templates. It is now available for all customers sending over v3 of our Mail Send API. Iterate over lists, handle conditionals and more, thanks to native support for a subset of Handlebars syntax! More information can be found in our [blog post announcement](https://sendgrid.com/blog/how-to-use-sendgrids-dynamic-templates-for-your-transactional-emails/). You can currently use this feature by manually creating the request body as [shown here](https://github.com/sendgrid/sendgrid-python/blob/master/USAGE.md#v3-mail-send). Now, we need to create helper code and examples for this SDK. #### Acceptance Criteria * [Implement a helper similar to what we have for the legacy templates](https://github.com/sendgrid/sendgrid-python/blob/master/use_cases/transational_templates.md) * Update the [transactional_templates.md](https://github.com/sendgrid/sendgrid-python/blob/master/use_cases/transational_templates.md) example to demonstrate the new *Dynamic Templates* using the helper and re-name the current example to *Legacy* #### Documentation - [Reference Implementation](https://github.com/sendgrid/sendgrid-nodejs/pull/691#issuecomment-407490342) - [How to Send an Email With Dynamic Transactional Templates](https://sendgrid.com/docs/User_Guide/Transactional_Templates/how_to_send_an_email_with_transactional_templates.html) - [Using Handlebars](https://sendgrid.com/docs/User_Guide/Transactional_Templates/Using_handlebars.html) - [Interactive API Docs](https://dynamic-templates.api-docs.io/3.0/mail-send-with-dynamic-transactional-templates/v3-mail-send)
sendgrid/sendgrid-python
diff --git a/test/test_mail.py b/test/test_mail.py index 7721b52..08d0feb 100644 --- a/test/test_mail.py +++ b/test/test_mail.py @@ -80,7 +80,6 @@ class UnitTests(unittest.TestCase): else: self.fail("Should have failed as SendGrid API key included") - def test_helloEmail(self): self.max_diff = None @@ -130,7 +129,7 @@ class UnitTests(unittest.TestCase): personalization = Personalization() personalization.add_to(Email("[email protected]")) mail.add_personalization(personalization) - + mail.add_content(Content("text/html", "<html><body>some text here</body></html>")) mail.add_content(Content("text/plain", "some text here")) @@ -562,3 +561,26 @@ class UnitTests(unittest.TestCase): def test_directly_setting_substitutions(self): personalization = Personalization() personalization.substitutions = [{'a': 0}] + + def test_dynamic_template_data(self): + p = Personalization() + p.add_to(Email('[email protected]')) + p.dynamic_template_data = { + 'customer': { + 'name': 'Bob', + 'returning': True + }, + 'total': 42 + } + + expected = { + 'to': [{'email': '[email protected]'}], + 'dynamic_template_data': { + 'customer': { + 'name': 'Bob', + 'returning': True + }, + 'total': 42 + } + } + self.assertDictEqual(p.get(), expected)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 3 }
5.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 dataclasses==0.8 Flask==0.10.1 importlib-metadata==4.8.3 iniconfig==1.1.1 itsdangerous==2.0.1 Jinja2==3.0.3 MarkupSafe==2.0.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-http-client==3.3.7 PyYAML==3.11 -e git+https://github.com/sendgrid/sendgrid-python.git@1ac58c6fc491554bc24573393ae6242e579dbabb#egg=sendgrid six==1.10.0 tomli==1.2.3 typing_extensions==4.1.1 Werkzeug==2.0.3 zipp==3.6.0
name: sendgrid-python channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - dataclasses==0.8 - flask==0.10.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - itsdangerous==2.0.1 - jinja2==3.0.3 - markupsafe==2.0.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-http-client==3.3.7 - pyyaml==3.11 - six==1.10.0 - tomli==1.2.3 - typing-extensions==4.1.1 - werkzeug==2.0.3 - zipp==3.6.0 prefix: /opt/conda/envs/sendgrid-python
[ "test/test_mail.py::UnitTests::test_dynamic_template_data" ]
[]
[ "test/test_mail.py::UnitTests::test_asm_display_group_limit", "test/test_mail.py::UnitTests::test_directly_setting_substitutions", "test/test_mail.py::UnitTests::test_disable_tracking", "test/test_mail.py::UnitTests::test_helloEmail", "test/test_mail.py::UnitTests::test_helloEmailAdditionalContent", "test/test_mail.py::UnitTests::test_kitchenSink", "test/test_mail.py::UnitTests::test_sendgridAPIKey", "test/test_mail.py::UnitTests::test_unicode_values_in_substitutions_helper" ]
[]
MIT License
2,852
[ "use_cases/transational_templates.md", "sendgrid/helpers/mail/personalization.py", "examples/helpers/mail/mail_example.py" ]
[ "use_cases/transational_templates.md", "sendgrid/helpers/mail/personalization.py", "examples/helpers/mail_example.py" ]
zopefoundation__persistent-83
aa6048a342d91656f3d1be6ff04bd1815e191a04
2018-08-01 20:06:48
85ab57959e9dfd265bec9b77f38776e961164661
jamadden: Another option, instead of having the C code round each time it retrieves `TimeStamp_sec`, would be to have both versions do the rounding before creating the raw value. That loses precision in the raw value, though, potentially leading to more collisions. jamadden: Argh. Apparently MSVC 9 doesn’t have the standard `round` function, at least not by default. mgedmin: > I see you already have a unit test for this case (6.5), and I wonder why it passes? That's because I'm an idiot who forgot that it's not `round(...)` that applies here but `round(..., 6)`. jamadden: > I think that C and Python versions should be consistent between themselves, but I don't think round(..., 6) is appropriate. That's essentially what happens in the raw bytes format, and why a trip through `raw()` and back previously appeared to lose that precision in the Python version. The C version is always derived from the raw version, which is why it showed that loss immediately. I did have an implementation of the Python version that was basically `self._elements = parseRaw(makeRaw(*elements))` but that seemed needlessly expensive, though that may be premature optimization. Would you prefer that? Or can you suggest a different approach? mgedmin: > That's essentially what happens in the raw bytes format Ah, does the raw bytes version store an integer number of microseconds? I should've refreshed my memory before attempting to review code. > self._elements = parseRaw(makeRaw(*elements)) I do actually prefer that (with a comment explaining that this is done to discard extra precision that wouldn't be preserved in the raw format). (TBH the thing I would prefer _most_ is for somebody smarter than me to review this PR.) jamadden: > It does, yes. Python: ```python def _makeRaw(year, month, day, hour, minute, second): a = (((year - 1900) * 12 + month - 1) * 31 + day - 1) a = (a * 24 + hour) * 60 + minute b = int(second / _SCONV) # Don't round() this; the C version does simple truncation return struct.pack('>II', a, b) ``` C: ```C PyObject * TimeStamp_FromDate(int year, int month, int day, int hour, int min, double sec) { TimeStamp *ts = NULL; //... /// ts->data is unsigned char[] sec /= SCONV; v = (unsigned int)sec; ts->data[4] = v / 16777216; ts->data[5] = (v % 16777216) / 65536; ts->data[6] = (v % 65536) / 256; ts->data[7] = v % 256; //... } ``` > I do actually prefer that (with a comment explaining that this is done to discard extra precision that wouldn't be preserved in the raw format). I can go back to that in the constructor. That's slightly closer parity with C. But I think I would still need a `round()` call in the `seconds()` accessor: [the C version needed that](https://github.com/zopefoundation/persistent/pull/83/files#diff-6a6e068aef7fec9e0c363cbce457b2a9R212). The problem, I think, is that the storage format is lossy, and without the `round()` sometimes the results don't make sense (they're not clearly derived from the input). jamadden: > But I think I would still need a round() call in the seconds() accessor: the C version needed that. No, the C version needed that precisely because the constructor doesn't round-trip through raw(). FWIW, this is a place where the compiler flag `-ffast-math` breaks things. The tests weren't passing for me this morning (specifically, 6.5555 was producing 6.555499999999999 in the C version), and it turned out to be because I compiled with `-ffast-math`. jamadden: >> But I think I would still need a round() call in the seconds() accessor: the C version needed that. > No, the C version needed that precisely because the constructor doesn't round-trip through raw(). Once again, I am wrong. The C version always constructs the double value from its raw bytes, which don't change across trips through raw(): ```C unsigned int v; double sec; v = (self->data[4] * 16777216 + self->data[5] * 65536 + self->data[6] * 256 + self->data[7]); sec = SCONV * (double)v; ``` The Python version explicitly calls `round()` when it parses raw bytes, so that's why the C version needs to do so as well. They either both need to or both shouldn't. If they both *do not* call `round` at all, then there is a single test failure in the Python implementation: `TimeStamp(2011, 2, 16, 14, 37, 22.80544).timeTime()` produces 1297867042.805439 instead of 1297867042.80544. That can be fixed with a change to `_makeUTC`. jamadden: ping :)
diff --git a/CHANGES.rst b/CHANGES.rst index 956c0a5..469c538 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,12 +4,6 @@ 4.3.1 (unreleased) ------------------ -- Change the default representation of ``Persistent`` objects to - include the representation of their OID and jar, if set. Also add - the ability for subclasses to implement ``_p_repr()`` instead of - overriding ``__repr__`` for better exception handling. See `issue 11 - <https://github.com/zopefoundation/persistent/issues/11>`_. - - Reach and maintain 100% test coverage. - Simplify ``__init__.py``, including removal of an attempted legacy @@ -37,6 +31,15 @@ - Remove some internal compatibility shims that are no longer necessary. See `PR 82 <https://github.com/zopefoundation/persistent/pull/82>`_. +- Make the return value of ``TimeStamp.second()`` consistent across C + and Python implementations when the ``TimeStamp`` was created from 6 + arguments with floating point seconds. Also make it match across + trips through ``TimeStamp.raw()``. Previously, the C version could + initially have erroneous rounding and too much false precision, + while the Python version could have too much precision. The raw/repr + values have not changed. See `issue 41 + <https://github.com/zopefoundation/persistent/issues/41>`_. + 4.3.0 (2018-07-30) ------------------ diff --git a/docs/using.rst b/docs/using.rst index 417b25e..5803d7e 100644 --- a/docs/using.rst +++ b/docs/using.rst @@ -1,10 +1,9 @@ -============================================= - Using :mod:`persistent` in your application -============================================= +Using :mod:`persistent` in your application +=========================================== Inheriting from :class:`persistent.Persistent` -============================================== +---------------------------------------------- The basic mechanism for making your application's objects persistent is mix-in interitance. Instances whose classes derive from @@ -15,7 +14,7 @@ they have been changed. Relationship to a Data Manager and its Cache -============================================ +-------------------------------------------- Except immediately after their creation, persistent objects are normally associated with a :term:`data manager` (also referred to as a :term:`jar`). @@ -64,7 +63,7 @@ The examples below use a stub data manager class, and its stub cache class: Persistent objects without a Data Manager -========================================= +----------------------------------------- Before aersistent instance has been associtated with a a data manager ( i.e., its ``_p_jar`` is still ``None``). @@ -167,7 +166,7 @@ Try all sorts of different ways to change the object's state: Associating an Object with a Data Manager -========================================= +----------------------------------------- Once associated with a data manager, a persistent object's behavior changes: @@ -220,7 +219,7 @@ control the state as described below, or use a :class:`~.PersistentList` or :class:`~.PersistentMapping`. Explicitly controlling ``_p_state`` -=================================== +----------------------------------- Persistent objects expose three methods for moving an object into and out of the "ghost" state:: :meth:`persistent.Persistent._p_activate`, @@ -329,7 +328,7 @@ which is exactly the same as calling ``_p_activate``: The pickling protocol -===================== +--------------------- Because persistent objects need to control how they are pickled and unpickled, the :class:`persistent.Persistent` base class overrides @@ -383,7 +382,7 @@ The ``_p_serial`` attribute is not affected by calling setstate. Estimated Object Size -===================== +--------------------- We can store a size estimation in ``_p_estimated_size``. Its default is 0. The size estimation can be used by a cache associated with the data manager @@ -413,7 +412,7 @@ Of course, the estimated size must not be negative. Overriding the attribute protocol -================================= +--------------------------------- Subclasses which override the attribute-management methods provided by :class:`persistent.Persistent`, but must obey some constraints: @@ -449,24 +448,3 @@ Subclasses which override the attribute-management methods provided by :meth:`__getattr__` For the ``__getattr__`` method, the behavior is like that for regular Python classes and for earlier versions of ZODB 3. - - -Implementing ``_p_repr`` -======================== - -Subclasses can implement ``_p_repr`` to provide a custom -representation. If this method raises an exception, the default -representation will be used. The benefit of implementing ``_p_repr`` -instead of overriding ``__repr__`` is that it provides safer handling -for objects that can't be activated because their persistent data is -missing or their jar is closed. - -.. doctest:: - - >>> class P(Persistent): - ... def _p_repr(self): - ... return "Custom repr" - - >>> p = P() - >>> print(repr(p)) - Custom repr diff --git a/persistent/cPersistence.c b/persistent/cPersistence.c index 9e0f3ae..131dd71 100644 --- a/persistent/cPersistence.c +++ b/persistent/cPersistence.c @@ -1372,101 +1372,6 @@ Per_set_sticky(cPersistentObject *self, PyObject* value) return 0; } -static PyObject* -repr_format_exception(char* format) -{ - /* If an exception we should catch occurred, return a new - string of its repr. Otherwise, return NULL. */ - PyObject *exc_t; - PyObject *exc_v; - PyObject *exc_tb; - PyObject *result = NULL; - - if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_Exception)) - { - PyErr_Fetch(&exc_t, &exc_v, &exc_tb); - PyErr_NormalizeException(&exc_t, &exc_v, &exc_tb); - PyErr_Clear(); - - result = PyUnicode_FromFormat(format, exc_v); - Py_DECREF(exc_t); - Py_DECREF(exc_v); - Py_DECREF(exc_tb); - } - return result; -} - -static PyObject* -repr_helper(PyObject *o, char* format) -{ - /* Returns a new reference, or NULL on error */ - PyObject *result; - - if (o) - { - result = PyUnicode_FromFormat(format, o); - if (!result) - result = repr_format_exception(format); - } - else - { - result = PyUnicode_FromString(""); - } - - return result; - -} - -static PyObject* -Per_repr(cPersistentObject *self) -{ - PyObject *prepr = NULL; - PyObject *prepr_exc_str = NULL; - - PyObject *oid_str = NULL; - PyObject *jar_str = NULL; - PyObject *result = NULL; - - prepr = PyObject_GetAttrString((PyObject*)Py_TYPE(self), "_p_repr"); - if (prepr) - { - result = PyObject_CallFunctionObjArgs(prepr, self, NULL); - if (result) - goto cleanup; - else - { - prepr_exc_str = repr_format_exception(" _p_repr %R"); - if (!prepr_exc_str) - goto cleanup; - } - } - else - { - PyErr_Clear(); - prepr_exc_str = PyUnicode_FromString(""); - } - - oid_str = repr_helper(self->oid, " oid %R"); - if (!oid_str) - goto cleanup; - - jar_str = repr_helper(self->jar, " in %R"); - if (!jar_str) - goto cleanup; - - result = PyUnicode_FromFormat("<%s object at %p%S%S%S>", - Py_TYPE(self)->tp_name, self, - oid_str, jar_str, prepr_exc_str); - -cleanup: - Py_XDECREF(prepr); - Py_XDECREF(prepr_exc_str); - Py_XDECREF(oid_str); - Py_XDECREF(jar_str); - - return result; -} - static PyGetSetDef Per_getsets[] = { {"_p_changed", (getter)Per_get_changed, (setter)Per_set_changed}, {"_p_jar", (getter)Per_get_jar, (setter)Per_set_jar}, @@ -1549,7 +1454,7 @@ static PyTypeObject Pertype = { 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ - (reprfunc)Per_repr, /* tp_repr */ + 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ diff --git a/persistent/interfaces.py b/persistent/interfaces.py index 61ed594..69b7bad 100644 --- a/persistent/interfaces.py +++ b/persistent/interfaces.py @@ -166,14 +166,6 @@ class IPersistent(Interface): these objects are invalidated, they immediately reload their state from their data manager, and are then in the saved state. - reprs - - By default, persistent objects include the reprs of their - _p_oid and _p_jar, if any, in their repr. If a subclass implements - the optional method ``_p_repr``, it will be called and its results returned - instead of the default repr; if this method raises an exception, that - exception will be caught and its repr included in the default repr. - """ _p_jar = Attribute( @@ -322,10 +314,10 @@ class IPersistent(Interface): def _p_getattr(name): """Test whether the base class must handle the name - + The method unghostifies the object, if necessary. The method records the object access, if necessary. - + This method should be called by subclass __getattribute__ implementations before doing anything else. If the method returns True, then __getattribute__ implementations must delegate @@ -479,7 +471,7 @@ class IPickleCache(Interface): """ Perform an incremental garbage collection sweep. o Reduce number of non-ghosts to 'cache_size', if possible. - + o Ghostify in LRU order. o Skip dirty or sticky objects. @@ -513,7 +505,7 @@ class IPickleCache(Interface): If the object's '_p_jar' is not None, raise. - If 'oid' is already in the cache, raise. + If 'oid' is already in the cache, raise. """ def reify(to_reify): @@ -544,7 +536,7 @@ class IPickleCache(Interface): o Any OID corresponding to a p-class will cause the corresponding p-class to be removed from the cache. - o For all other OIDs, ghostify the corrsponding object and + o For all other OIDs, ghostify the corrsponding object and remove it from the ring. """ diff --git a/persistent/persistence.py b/persistent/persistence.py index 8d72ce1..7bf0508 100644 --- a/persistent/persistence.py +++ b/persistent/persistence.py @@ -11,7 +11,7 @@ # FOR A PARTICULAR PURPOSE. # ############################################################################## - +import sys from zope.interface import implementer @@ -20,13 +20,14 @@ from persistent.interfaces import GHOST from persistent.interfaces import UPTODATE from persistent.interfaces import CHANGED from persistent.interfaces import STICKY - +from persistent.interfaces import OID_TYPE from persistent.interfaces import SERIAL_TYPE from persistent.timestamp import TimeStamp from persistent.timestamp import _ZERO from persistent._compat import copy_reg from persistent._compat import intern +from . import ring _INITIAL_SERIAL = _ZERO @@ -557,39 +558,6 @@ class Persistent(object): if cache is not None: return cache.get(oid) is self - def __repr__(self): - p_repr_str = '' - p_repr = getattr(type(self), '_p_repr', None) - if p_repr is not None: - try: - return p_repr(self) - except Exception as e: - p_repr_str = ' _p_repr %r' % (e,) - - oid = _OGA(self, '_Persistent__oid') - jar = _OGA(self, '_Persistent__jar') - - oid_str = '' - jar_str = '' - - if oid is not None: - try: - oid_str = ' oid %r' % (oid,) - except Exception as e: - oid_str = ' oid %r' % (e,) - - if jar is not None: - try: - jar_str = ' in %r' % (jar,) - except Exception as e: - jar_str = ' in %r' % (e,) - - return '<%s.%s object at 0x%x%s%s%s>' % ( - type(self).__module__, type(self).__name__, id(self), - oid_str, jar_str, p_repr_str - ) - - def _estimated_size_in_24_bits(value): if value > 1073741696: return 16777215 diff --git a/persistent/timestamp.py b/persistent/timestamp.py index 5da8535..a031a72 100644 --- a/persistent/timestamp.py +++ b/persistent/timestamp.py @@ -53,6 +53,7 @@ class _UTC(datetime.tzinfo): return dt def _makeUTC(y, mo, d, h, mi, s): + s = round(s, 6) # microsecond precision, to match the C implementation usec, sec = math.modf(s) sec = int(sec) usec = int(usec * 1e6) @@ -75,7 +76,7 @@ def _parseRaw(octets): day = a // (60 * 24) % 31 + 1 month = a // (60 * 24 * 31) % 12 + 1 year = a // (60 * 24 * 31 * 12) + 1900 - second = round(b * _SCONV, 6) #microsecond precision + second = b * _SCONV return (year, month, day, hour, minute, second) @@ -83,6 +84,7 @@ class pyTimeStamp(object): __slots__ = ('_raw', '_elements') def __init__(self, *args): + self._elements = None if len(args) == 1: raw = args[0] if not isinstance(raw, _RAWTYPE): @@ -90,14 +92,18 @@ class pyTimeStamp(object): if len(raw) != 8: raise TypeError('Raw must be 8 octets') self._raw = raw - self._elements = _parseRaw(raw) elif len(args) == 6: self._raw = _makeRaw(*args) - self._elements = args + # Note that we don't preserve the incoming arguments in self._elements, + # we derive them from the raw value. This is because the incoming + # seconds value could have more precision than would survive + # in the raw data, so we must be consistent. else: raise TypeError('Pass either a single 8-octet arg ' 'or 5 integers and a float') + self._elements = _parseRaw(self._raw) + def raw(self): return self._raw
pyTimeStamp and TimeStamp have different sub-second precision I don't think this is correct: ``` >>> from persistent.timestamp import TimeStamp, pyTimeStamp >>> ts1 = TimeStamp(2001, 2, 3, 4, 5, 6.123456789) >>> ts2 = pyTimeStamp(2001, 2, 3, 4, 5, 6.123456789) >>> ts1 == ts2 True >>> ts1.second() == ts2.second() False >>> ts1.second() 6.1234567780047655 >>> ts2.second() 6.123456789 ```
zopefoundation/persistent
diff --git a/persistent/tests/test_persistence.py b/persistent/tests/test_persistence.py index 9ded7e2..3ce52e7 100644 --- a/persistent/tests/test_persistence.py +++ b/persistent/tests/test_persistence.py @@ -11,31 +11,24 @@ # FOR A PARTICULAR PURPOSE. # ############################################################################## +import os +import unittest import platform -import re import sys -import unittest -import persistent from persistent._compat import copy_reg +py_impl = getattr(platform, 'python_implementation', lambda: None) +_is_pypy3 = py_impl() == 'PyPy' and sys.version_info[0] > 2 +_is_jython = py_impl() == 'Jython' -_is_pypy3 = platform.python_implementation() == 'PyPy' and sys.version_info[0] > 2 -_is_jython = platform.python_implementation() == 'Jython' - -# pylint:disable=R0904,W0212,E1101 +#pylint: disable=R0904,W0212,E1101 # pylint:disable=attribute-defined-outside-init,too-many-lines -# pylint:disable=blacklisted-name,useless-object-inheritance +# pylint:disable=blacklisted-name # Hundreds of unused jar and OID vars make this useless # pylint:disable=unused-variable -def skipIfNoCExtension(o): - return unittest.skipIf( - persistent._cPersistence is None, - "The C extension is not available")(o) - - class _Persistent_Base(object): # py2/3 compat @@ -1622,7 +1615,7 @@ class _Persistent_Base(object): def __setattr__(self, name, value): raise AssertionError("Should not be called") inst = subclass() - self.assertEqual(object.__getattribute__(inst, '_v_setattr_called'), False) + self.assertEqual(object.__getattribute__(inst,'_v_setattr_called'), False) def test_can_set__p_attrs_if_subclass_denies_setattr(self): # ZODB defines a PersistentBroken subclass that only lets us @@ -1671,7 +1664,7 @@ class _Persistent_Base(object): self.assertEqual(candidate._p_state, UPTODATE) cache.new_ghost(KEY, candidate) - self.assertIs(cache.get(KEY), candidate) + self.assertTrue(cache.get(KEY) is candidate) self.assertEqual(candidate._p_oid, KEY) self.assertEqual(candidate._p_state, GHOST) self.assertEqual(candidate.set_by_new, 1) @@ -1692,192 +1685,11 @@ class _Persistent_Base(object): self.assertEqual(candidate._p_state, UPTODATE) cache.new_ghost(KEY, candidate) - self.assertIs(cache.get(KEY), candidate) + self.assertTrue(cache.get(KEY) is candidate) self.assertEqual(candidate._p_oid, KEY) self.assertEqual(candidate._p_state, GHOST) self.assertEqual(candidate.set_by_new, 1) - def _normalize_repr(self, r): - # Pure-python vs C - r = r.replace('persistent.persistence.Persistent', 'persistent.Persistent') - r = r.replace("persistent.tests.test_persistence.", '') - # addresses - r = re.sub(r'0x[0-9a-fA-F]*', '0xdeadbeef', r) - # Python 3.7 removed the trailing , in exception reprs - r = r.replace("',)", "')") - # Python 2 doesn't have a leading b prefix for byte literals - r = r.replace("oid '", "oid b'") - return r - - def _normalized_repr(self, o): - return self._normalize_repr(repr(o)) - - def test_repr_no_oid_no_jar(self): - p = self._makeOne() - result = self._normalized_repr(p) - self.assertEqual(result, '<persistent.Persistent object at 0xdeadbeef>') - - def test_repr_no_oid_in_jar(self): - p = self._makeOne() - - class Jar(object): - def __repr__(self): - return '<SomeJar>' - - p._p_jar = Jar() - - result = self._normalized_repr(p) - self.assertEqual( - result, - "<persistent.Persistent object at 0xdeadbeef in <SomeJar>>") - - def test_repr_oid_no_jar(self): - p = self._makeOne() - p._p_oid = b'12345678' - - result = self._normalized_repr(p) - self.assertEqual( - result, - "<persistent.Persistent object at 0xdeadbeef oid b'12345678'>") - - def test_repr_no_oid_repr_jar_raises_exception(self): - p = self._makeOne() - - class Jar(object): - def __repr__(self): - raise Exception('jar repr failed') - - p._p_jar = Jar() - - result = self._normalized_repr(p) - self.assertEqual( - result, - "<persistent.Persistent object at 0xdeadbeef in Exception('jar repr failed')>") - - - def test_repr_oid_raises_exception_no_jar(self): - p = self._makeOne() - - class BadOID(bytes): - def __repr__(self): - raise Exception("oid repr failed") - p._p_oid = BadOID(b'12345678') - - result = self._normalized_repr(p) - self.assertEqual( - result, - "<persistent.Persistent object at 0xdeadbeef oid Exception('oid repr failed')>") - - - def test_repr_oid_and_jar_raise_exception(self): - p = self._makeOne() - - class BadOID(bytes): - def __repr__(self): - raise Exception("oid repr failed") - p._p_oid = BadOID(b'12345678') - - class Jar(object): - def __repr__(self): - raise Exception('jar repr failed') - - p._p_jar = Jar() - - - result = self._normalized_repr(p) - self.assertEqual( - result, - "<persistent.Persistent object at 0xdeadbeef oid Exception('oid repr failed')" - " in Exception('jar repr failed')>") - - def test_repr_no_oid_repr_jar_raises_baseexception(self): - p = self._makeOne() - - class Jar(object): - def __repr__(self): - raise BaseException('jar repr failed') - - p._p_jar = Jar() - with self.assertRaisesRegex(BaseException, 'jar repr failed'): - repr(p) - - def test_repr_oid_raises_baseexception_no_jar(self): - p = self._makeOne() - - class BadOID(bytes): - def __repr__(self): - raise BaseException("oid repr failed") - p._p_oid = BadOID(b'12345678') - - with self.assertRaisesRegex(BaseException, 'oid repr failed'): - repr(p) - - def test_repr_oid_and_jar(self): - p = self._makeOne() - p._p_oid = b'12345678' - - class Jar(object): - def __repr__(self): - return '<SomeJar>' - - p._p_jar = Jar() - - result = self._normalized_repr(p) - self.assertEqual( - result, - "<persistent.Persistent object at 0xdeadbeef oid b'12345678' in <SomeJar>>") - - def test__p_repr(self): - class P(self._getTargetClass()): - def _p_repr(self): - return "Override" - p = P() - self.assertEqual("Override", repr(p)) - - def test__p_repr_exception(self): - class P(self._getTargetClass()): - def _p_repr(self): - raise Exception("_p_repr failed") - p = P() - result = self._normalized_repr(p) - self.assertEqual( - result, - "<P object at 0xdeadbeef" - " _p_repr Exception('_p_repr failed')>") - - p._p_oid = b'12345678' - result = self._normalized_repr(p) - self.assertEqual( - result, - "<P object at 0xdeadbeef oid b'12345678'" - " _p_repr Exception('_p_repr failed')>") - - class Jar(object): - def __repr__(self): - return '<SomeJar>' - - p._p_jar = Jar() - result = self._normalized_repr(p) - self.assertEqual( - result, - "<P object at 0xdeadbeef oid b'12345678'" - " in <SomeJar> _p_repr Exception('_p_repr failed')>") - - def test__p_repr_in_instance_ignored(self): - class P(self._getTargetClass()): - pass - p = P() - p._p_repr = lambda: "Instance" - result = self._normalized_repr(p) - self.assertEqual(result, '<P object at 0xdeadbeef>') - - def test__p_repr_baseexception(self): - class P(self._getTargetClass()): - def _p_repr(self): - raise BaseException("_p_repr failed") - p = P() - with self.assertRaisesRegex(BaseException, '_p_repr failed'): - repr(p) class PyPersistentTests(unittest.TestCase, _Persistent_Base): @@ -1989,39 +1801,47 @@ class PyPersistentTests(unittest.TestCase, _Persistent_Base): inst._Persistent__flags = None inst._p_accessed() +_add_to_suite = [PyPersistentTests] -@skipIfNoCExtension -class CPersistentTests(unittest.TestCase, _Persistent_Base): +if not os.environ.get('PURE_PYTHON'): + try: + from persistent import cPersistence + except ImportError: # pragma: no cover + pass + else: + class CPersistentTests(unittest.TestCase, _Persistent_Base): - def _getTargetClass(self): - from persistent.cPersistence import Persistent - return Persistent + def _getTargetClass(self): + from persistent.cPersistence import Persistent + return Persistent - def _checkMRU(self, jar, value): - pass # Figure this out later + def _checkMRU(self, jar, value): + pass # Figure this out later - def _clearMRU(self, jar): - pass # Figure this out later + def _clearMRU(self, jar): + pass # Figure this out later - def _makeCache(self, jar): - from persistent.cPickleCache import PickleCache - return PickleCache(jar) + def _makeCache(self, jar): + from persistent.cPickleCache import PickleCache + return PickleCache(jar) + + _add_to_suite.append(CPersistentTests) + class Test_simple_new(unittest.TestCase): -@skipIfNoCExtension -class Test_simple_new(unittest.TestCase): + def _callFUT(self, x): + from persistent.cPersistence import simple_new + return simple_new(x) - def _callFUT(self, x): - from persistent.cPersistence import simple_new - return simple_new(x) + def test_w_non_type(self): + self.assertRaises(TypeError, self._callFUT, '') - def test_w_non_type(self): - self.assertRaises(TypeError, self._callFUT, '') + def test_w_type(self): + TO_CREATE = [type, list, tuple, object, dict] + for typ in TO_CREATE: + self.assertTrue(isinstance(self._callFUT(typ), typ)) - def test_w_type(self): - TO_CREATE = [type, list, tuple, object, dict] - for typ in TO_CREATE: - self.assertTrue(isinstance(self._callFUT(typ), typ)) + _add_to_suite.append(Test_simple_new) def test_suite(): - return unittest.defaultTestLoader.loadTestsFromName(__name__) + return unittest.TestSuite([unittest.makeSuite(x) for x in _add_to_suite]) diff --git a/persistent/tests/test_timestamp.py b/persistent/tests/test_timestamp.py index cc1253c..ff8b6a9 100644 --- a/persistent/tests/test_timestamp.py +++ b/persistent/tests/test_timestamp.py @@ -17,6 +17,8 @@ import sys MAX_32_BITS = 2 ** 31 - 1 MAX_64_BITS = 2 ** 63 - 1 +import persistent.timestamp + class Test__UTC(unittest.TestCase): def _getTargetClass(self): @@ -202,7 +204,8 @@ class TimeStampTests(pyTimeStampTests): from persistent.timestamp import TimeStamp return TimeStamp - [email protected](persistent.timestamp.CTimeStamp is None, + "CTimeStamp not available") class PyAndCComparisonTests(unittest.TestCase): """ Compares C and Python implementations. @@ -254,7 +257,6 @@ class PyAndCComparisonTests(unittest.TestCase): def test_equal(self): c, py = self._make_C_and_Py(*self.now_ts_args) - self.assertEqual(c, py) def test_hash_equal(self): @@ -396,22 +398,32 @@ class PyAndCComparisonTests(unittest.TestCase): self.assertTrue(big_c != small_py) self.assertTrue(small_py != big_c) + def test_seconds_precision(self, seconds=6.123456789): + # https://github.com/zopefoundation/persistent/issues/41 + args = (2001, 2, 3, 4, 5, seconds) + c = self._makeC(*args) + py = self._makePy(*args) -def test_suite(): - suite = [ - unittest.makeSuite(Test__UTC), - unittest.makeSuite(pyTimeStampTests), - unittest.makeSuite(TimeStampTests), - ] + self.assertEqual(c, py) + self.assertEqual(c.second(), py.second()) + + py2 = self._makePy(c.raw()) + self.assertEqual(py2, c) + + c2 = self._makeC(c.raw()) + self.assertEqual(c2, c) + + def test_seconds_precision_half(self): + # make sure our rounding matches + self.test_seconds_precision(seconds=6.5) + self.test_seconds_precision(seconds=6.55) + self.test_seconds_precision(seconds=6.555) + self.test_seconds_precision(seconds=6.5555) + self.test_seconds_precision(seconds=6.55555) + self.test_seconds_precision(seconds=6.555555) + self.test_seconds_precision(seconds=6.5555555) + self.test_seconds_precision(seconds=6.55555555) + self.test_seconds_precision(seconds=6.555555555) - try: - from persistent.timestamp import pyTimeStamp - from persistent.timestamp import TimeStamp - except ImportError: # pragma: no cover - pass - else: - if pyTimeStamp != TimeStamp: - # We have both implementations available - suite.append(unittest.makeSuite(PyAndCComparisonTests)) - - return unittest.TestSuite(suite) +def test_suite(): + return unittest.defaultTestLoader.loadTestsFromName(__name__)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 6 }
4.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "cffi", "zope.testrunner", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 exceptiongroup==1.2.2 importlib-metadata==6.7.0 iniconfig==2.0.0 manuel==1.13.0 packaging==24.0 -e git+https://github.com/zopefoundation/persistent.git@aa6048a342d91656f3d1be6ff04bd1815e191a04#egg=persistent pluggy==1.2.0 pycparser==2.21 pytest==7.4.4 tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0 zope.exceptions==5.1 zope.interface==6.4.post2 zope.testrunner==6.5
name: persistent channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cffi==1.15.1 - exceptiongroup==1.2.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - manuel==1.13.0 - packaging==24.0 - pluggy==1.2.0 - pycparser==2.21 - pytest==7.4.4 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 - zope-exceptions==5.1 - zope-interface==6.4.post2 - zope-testrunner==6.5 prefix: /opt/conda/envs/persistent
[ "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_seconds_precision", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_seconds_precision_half" ]
[]
[ "persistent/tests/test_persistence.py::PyPersistentTests::test___delattr___p__names", "persistent/tests/test_persistence.py::PyPersistentTests::test___delattr__normal_name_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test___delattr__normal_name_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test___delattr__normal_name_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test___delattr__normal_name_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute___non_cooperative", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute___p__names", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute__normal_name_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute__normal_name_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute__normal_name_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute__normal_name_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test___getattribute__special_name", "persistent/tests/test_persistence.py::PyPersistentTests::test___getstate__", "persistent/tests/test_persistence.py::PyPersistentTests::test___getstate___derived_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test___getstate___derived_w_slots", "persistent/tests/test_persistence.py::PyPersistentTests::test___getstate___derived_w_slots_in_base_and_derived", "persistent/tests/test_persistence.py::PyPersistentTests::test___getstate___derived_w_slots_in_base_but_not_derived", "persistent/tests/test_persistence.py::PyPersistentTests::test___reduce__", "persistent/tests/test_persistence.py::PyPersistentTests::test___reduce__w_subclass_having_getnewargs", "persistent/tests/test_persistence.py::PyPersistentTests::test___reduce__w_subclass_having_getnewargs_and_getstate", "persistent/tests/test_persistence.py::PyPersistentTests::test___reduce__w_subclass_having_getstate", "persistent/tests/test_persistence.py::PyPersistentTests::test___setattr___p__names", "persistent/tests/test_persistence.py::PyPersistentTests::test___setattr___v__name", "persistent/tests/test_persistence.py::PyPersistentTests::test___setattr__normal_name_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test___setattr__normal_name_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test___setattr__normal_name_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test___setattr__normal_name_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___derived_w_slots", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___derived_w_slots_in_base_but_not_derived", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___derived_w_slots_in_base_classes", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___doesnt_fail_on_non_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___doesnt_fail_on_non_string_keys", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___empty", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___interns_dict_keys", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___nonempty", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___nonempty_derived_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test___setstate___nonempty_derived_w_dict_w_two_keys", "persistent/tests/test_persistence.py::PyPersistentTests::test__ancient_dict_layout_bug", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_activate_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_activate_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_activate_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_activate_leaves_object_in_saved_even_if_object_mutated_self", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_activate_only_sets_state_once", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_activate_w_broken_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_changed_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_saved_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_from_unsaved_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_deactivate_when_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_delattr_w__p__names", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_delattr_w_normal_name", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_getattr_w__p__names", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_getattr_w_normal_name", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_getattr_w_special_names", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_changed_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_changed_w_slots", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_changed_w_slots_compat", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_saved_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_sticky_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_invalidate_from_unsaved_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_mtime_activates_object", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_mtime_no_serial", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_mtime_w_serial", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_setattr_w__p__name", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_setattr_w_normal_name", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_state_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_state_changed_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_state_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_state_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_state_saved_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_state_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_status_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_status_changed_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_status_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_status_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_status_saved_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test__p_status_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test_accessed_invalidated_with_jar_and_oid_but_no_cache", "persistent/tests/test_persistence.py::PyPersistentTests::test_accessed_with_jar_and_oid_but_not_in_cache", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_false_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_false_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_false_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_false_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_none_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_none_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_none_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_none_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_none_when_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_true_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_true_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_true_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_changed_true_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_estimated_size_bigger", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_estimated_size_just_over_threshold", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_estimated_size_negative", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_estimated_size_small", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_estimated_size_wrong_type", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_jar_not_in_cache_allowed", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_jar_w_new_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_jar_w_valid_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_oid_not_in_cache_allowed", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_oid_w_None_wo_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_oid_w_invalid_oid", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_oid_w_new_oid_w_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_oid_w_new_oid_wo_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_oid_w_valid_oid", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_serial_too_long", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_serial_too_short", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_serial_w_None", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_serial_w_invalid_type", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_serial_w_valid_serial", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_sticky_false_non_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_sticky_false_when_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_sticky_true_non_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_assign_p_sticky_true_when_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_can_set__p_attrs_if_subclass_denies_setattr", "persistent/tests/test_persistence.py::PyPersistentTests::test_class_conforms_to_IPersistent", "persistent/tests/test_persistence.py::PyPersistentTests::test_ctor", "persistent/tests/test_persistence.py::PyPersistentTests::test_del_jar_like_ZODB_abort", "persistent/tests/test_persistence.py::PyPersistentTests::test_del_jar_no_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_del_jar_of_inactive_object_that_has_no_state", "persistent/tests/test_persistence.py::PyPersistentTests::test_del_jar_while_in_cache", "persistent/tests/test_persistence.py::PyPersistentTests::test_del_oid_like_ZODB_abort", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_changed_from_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_changed_from_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_changed_from_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_changed_from_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_changed_from_unsaved_w_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_changed_when_sticky", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_oid_of_subclass_calling_p_delattr", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_oid_w_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_oid_wo_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_delete_p_serial", "persistent/tests/test_persistence.py::PyPersistentTests::test_instance_conforms_to_IPersistent", "persistent/tests/test_persistence.py::PyPersistentTests::test_new_ghost_success_not_already_ghost_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test_new_ghost_success_not_already_ghost_slot", "persistent/tests/test_persistence.py::PyPersistentTests::test_p_accessed_with_jar_with_oid_as_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_p_accessed_with_jar_without_oid", "persistent/tests/test_persistence.py::PyPersistentTests::test_p_activate_with_jar_without_oid", "persistent/tests/test_persistence.py::PyPersistentTests::test_p_invalidate_calls_p_deactivate", "persistent/tests/test_persistence.py::PyPersistentTests::test_p_invalidate_with_slots_broken_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_pickle_roundtrip_simple", "persistent/tests/test_persistence.py::PyPersistentTests::test_pickle_roundtrip_w_getnewargs_and_getstate", "persistent/tests/test_persistence.py::PyPersistentTests::test_pickle_roundtrip_w_slots_and_empty_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test_pickle_roundtrip_w_slots_and_filled_dict", "persistent/tests/test_persistence.py::PyPersistentTests::test_pickle_roundtrip_w_slots_filled_slot", "persistent/tests/test_persistence.py::PyPersistentTests::test_pickle_roundtrip_w_slots_missing_slot", "persistent/tests/test_persistence.py::PyPersistentTests::test_query_p_changed_changed", "persistent/tests/test_persistence.py::PyPersistentTests::test_query_p_changed_ghost", "persistent/tests/test_persistence.py::PyPersistentTests::test_query_p_changed_saved", "persistent/tests/test_persistence.py::PyPersistentTests::test_query_p_changed_unsaved", "persistent/tests/test_persistence.py::PyPersistentTests::test_query_p_estimated_size_del", "persistent/tests/test_persistence.py::PyPersistentTests::test_query_p_estimated_size_new", "persistent/tests/test_persistence.py::PyPersistentTests::test_set__p_changed_w_broken_jar", "persistent/tests/test_persistence.py::PyPersistentTests::test_setattr_in_subclass_is_not_called_creating_an_instance", "persistent/tests/test_persistence.py::PyPersistentTests::test_w_alternate_metaclass", "persistent/tests/test_persistence.py::PyPersistentTests::test_w_diamond_inheritance", "persistent/tests/test_persistence.py::CPersistentTests::test___delattr___p__names", "persistent/tests/test_persistence.py::CPersistentTests::test___delattr__normal_name_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test___delattr__normal_name_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test___delattr__normal_name_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test___delattr__normal_name_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute___non_cooperative", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute___p__names", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute__normal_name_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute__normal_name_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute__normal_name_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute__normal_name_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test___getattribute__special_name", "persistent/tests/test_persistence.py::CPersistentTests::test___getstate__", "persistent/tests/test_persistence.py::CPersistentTests::test___getstate___derived_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test___getstate___derived_w_slots", "persistent/tests/test_persistence.py::CPersistentTests::test___getstate___derived_w_slots_in_base_and_derived", "persistent/tests/test_persistence.py::CPersistentTests::test___getstate___derived_w_slots_in_base_but_not_derived", "persistent/tests/test_persistence.py::CPersistentTests::test___reduce__", "persistent/tests/test_persistence.py::CPersistentTests::test___reduce__w_subclass_having_getnewargs", "persistent/tests/test_persistence.py::CPersistentTests::test___reduce__w_subclass_having_getnewargs_and_getstate", "persistent/tests/test_persistence.py::CPersistentTests::test___reduce__w_subclass_having_getstate", "persistent/tests/test_persistence.py::CPersistentTests::test___setattr___p__names", "persistent/tests/test_persistence.py::CPersistentTests::test___setattr___v__name", "persistent/tests/test_persistence.py::CPersistentTests::test___setattr__normal_name_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test___setattr__normal_name_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test___setattr__normal_name_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test___setattr__normal_name_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___derived_w_slots", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___derived_w_slots_in_base_but_not_derived", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___derived_w_slots_in_base_classes", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___doesnt_fail_on_non_dict", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___doesnt_fail_on_non_string_keys", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___empty", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___interns_dict_keys", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___nonempty", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___nonempty_derived_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test___setstate___nonempty_derived_w_dict_w_two_keys", "persistent/tests/test_persistence.py::CPersistentTests::test__ancient_dict_layout_bug", "persistent/tests/test_persistence.py::CPersistentTests::test__p_activate_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test__p_activate_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_activate_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_activate_leaves_object_in_saved_even_if_object_mutated_self", "persistent/tests/test_persistence.py::CPersistentTests::test__p_activate_only_sets_state_once", "persistent/tests/test_persistence.py::CPersistentTests::test__p_activate_w_broken_jar", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_changed_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_saved_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_from_unsaved_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_deactivate_when_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test__p_delattr_w__p__names", "persistent/tests/test_persistence.py::CPersistentTests::test__p_delattr_w_normal_name", "persistent/tests/test_persistence.py::CPersistentTests::test__p_getattr_w__p__names", "persistent/tests/test_persistence.py::CPersistentTests::test__p_getattr_w_normal_name", "persistent/tests/test_persistence.py::CPersistentTests::test__p_getattr_w_special_names", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_changed_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_changed_w_slots", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_changed_w_slots_compat", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_saved_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_sticky_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_invalidate_from_unsaved_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test__p_mtime_activates_object", "persistent/tests/test_persistence.py::CPersistentTests::test__p_mtime_no_serial", "persistent/tests/test_persistence.py::CPersistentTests::test__p_mtime_w_serial", "persistent/tests/test_persistence.py::CPersistentTests::test__p_setattr_w__p__name", "persistent/tests/test_persistence.py::CPersistentTests::test__p_setattr_w_normal_name", "persistent/tests/test_persistence.py::CPersistentTests::test__p_state_changed", "persistent/tests/test_persistence.py::CPersistentTests::test__p_state_changed_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test__p_state_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test__p_state_saved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_state_saved_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test__p_state_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_status_changed", "persistent/tests/test_persistence.py::CPersistentTests::test__p_status_changed_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test__p_status_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test__p_status_saved", "persistent/tests/test_persistence.py::CPersistentTests::test__p_status_saved_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test__p_status_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_false_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_false_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_false_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_false_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_none_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_none_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_none_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_none_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_none_when_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_true_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_true_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_true_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_changed_true_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_estimated_size_bigger", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_estimated_size_just_over_threshold", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_estimated_size_negative", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_estimated_size_small", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_estimated_size_wrong_type", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_jar_not_in_cache_allowed", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_jar_w_new_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_jar_w_valid_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_oid_not_in_cache_allowed", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_oid_w_None_wo_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_oid_w_invalid_oid", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_oid_w_new_oid_w_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_oid_w_new_oid_wo_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_oid_w_valid_oid", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_serial_too_long", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_serial_too_short", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_serial_w_None", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_serial_w_invalid_type", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_serial_w_valid_serial", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_sticky_false_non_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_sticky_false_when_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_sticky_true_non_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_assign_p_sticky_true_when_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_can_set__p_attrs_if_subclass_denies_setattr", "persistent/tests/test_persistence.py::CPersistentTests::test_class_conforms_to_IPersistent", "persistent/tests/test_persistence.py::CPersistentTests::test_ctor", "persistent/tests/test_persistence.py::CPersistentTests::test_del_jar_like_ZODB_abort", "persistent/tests/test_persistence.py::CPersistentTests::test_del_jar_no_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_del_jar_of_inactive_object_that_has_no_state", "persistent/tests/test_persistence.py::CPersistentTests::test_del_jar_while_in_cache", "persistent/tests/test_persistence.py::CPersistentTests::test_del_oid_like_ZODB_abort", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_changed_from_changed", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_changed_from_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_changed_from_saved", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_changed_from_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_changed_from_unsaved_w_dict", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_changed_when_sticky", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_oid_of_subclass_calling_p_delattr", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_oid_w_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_oid_wo_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_delete_p_serial", "persistent/tests/test_persistence.py::CPersistentTests::test_instance_conforms_to_IPersistent", "persistent/tests/test_persistence.py::CPersistentTests::test_new_ghost_success_not_already_ghost_dict", "persistent/tests/test_persistence.py::CPersistentTests::test_new_ghost_success_not_already_ghost_slot", "persistent/tests/test_persistence.py::CPersistentTests::test_p_invalidate_calls_p_deactivate", "persistent/tests/test_persistence.py::CPersistentTests::test_p_invalidate_with_slots_broken_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_pickle_roundtrip_simple", "persistent/tests/test_persistence.py::CPersistentTests::test_pickle_roundtrip_w_getnewargs_and_getstate", "persistent/tests/test_persistence.py::CPersistentTests::test_pickle_roundtrip_w_slots_and_empty_dict", "persistent/tests/test_persistence.py::CPersistentTests::test_pickle_roundtrip_w_slots_and_filled_dict", "persistent/tests/test_persistence.py::CPersistentTests::test_pickle_roundtrip_w_slots_filled_slot", "persistent/tests/test_persistence.py::CPersistentTests::test_pickle_roundtrip_w_slots_missing_slot", "persistent/tests/test_persistence.py::CPersistentTests::test_query_p_changed_changed", "persistent/tests/test_persistence.py::CPersistentTests::test_query_p_changed_ghost", "persistent/tests/test_persistence.py::CPersistentTests::test_query_p_changed_saved", "persistent/tests/test_persistence.py::CPersistentTests::test_query_p_changed_unsaved", "persistent/tests/test_persistence.py::CPersistentTests::test_query_p_estimated_size_del", "persistent/tests/test_persistence.py::CPersistentTests::test_query_p_estimated_size_new", "persistent/tests/test_persistence.py::CPersistentTests::test_set__p_changed_w_broken_jar", "persistent/tests/test_persistence.py::CPersistentTests::test_setattr_in_subclass_is_not_called_creating_an_instance", "persistent/tests/test_persistence.py::CPersistentTests::test_w_alternate_metaclass", "persistent/tests/test_persistence.py::CPersistentTests::test_w_diamond_inheritance", "persistent/tests/test_persistence.py::Test_simple_new::test_w_non_type", "persistent/tests/test_persistence.py::Test_simple_new::test_w_type", "persistent/tests/test_persistence.py::test_suite", "persistent/tests/test_timestamp.py::Test__UTC::test_dst", "persistent/tests/test_timestamp.py::Test__UTC::test_fromutc", "persistent/tests/test_timestamp.py::Test__UTC::test_tzname", "persistent/tests/test_timestamp.py::Test__UTC::test_utcoffset", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_comparisons_to_non_timestamps", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_ctor_from_elements", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_ctor_from_invalid_strings", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_ctor_from_string", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_ctor_from_string_non_zero", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_ctor_invalid_arglist", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_laterThan_invalid", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_laterThan_self_is_earlier", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_laterThan_self_is_later", "persistent/tests/test_timestamp.py::pyTimeStampTests::test_repr", "persistent/tests/test_timestamp.py::TimeStampTests::test_comparisons_to_non_timestamps", "persistent/tests/test_timestamp.py::TimeStampTests::test_ctor_from_elements", "persistent/tests/test_timestamp.py::TimeStampTests::test_ctor_from_invalid_strings", "persistent/tests/test_timestamp.py::TimeStampTests::test_ctor_from_string", "persistent/tests/test_timestamp.py::TimeStampTests::test_ctor_from_string_non_zero", "persistent/tests/test_timestamp.py::TimeStampTests::test_ctor_invalid_arglist", "persistent/tests/test_timestamp.py::TimeStampTests::test_laterThan_invalid", "persistent/tests/test_timestamp.py::TimeStampTests::test_laterThan_self_is_earlier", "persistent/tests/test_timestamp.py::TimeStampTests::test_laterThan_self_is_later", "persistent/tests/test_timestamp.py::TimeStampTests::test_repr", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_equal", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_hash_equal", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_hash_equal_constants", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_ordering", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_py_hash_32_64_bit", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_raw_equal", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_reprs_equal", "persistent/tests/test_timestamp.py::PyAndCComparisonTests::test_strs_equal", "persistent/tests/test_timestamp.py::test_suite" ]
[]
Zope Public License 2.1
2,853
[ "persistent/timestamp.py", "persistent/interfaces.py", "docs/using.rst", "CHANGES.rst", "persistent/cPersistence.c", "persistent/persistence.py" ]
[ "persistent/timestamp.py", "persistent/interfaces.py", "docs/using.rst", "CHANGES.rst", "persistent/cPersistence.c", "persistent/persistence.py" ]
nipy__nipype-2669
69dce12bdf256c3bb0a8b6da9a1d1cdce48b66ec
2018-08-02 04:22:30
24e52aa4229fc3ce7c3e3ac79b3317999d35f1d3
diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py index 9d0bc3c69..7f7afd392 100644 --- a/nipype/pipeline/engine/base.py +++ b/nipype/pipeline/engine/base.py @@ -36,11 +36,11 @@ class EngineBase(object): """ self._hierarchy = None - self._name = None + self.name = name + self._id = self.name # for compatibility with node expansion using iterables self.base_dir = base_dir self.config = deepcopy(config._sections) - self.name = name @property def name(self): @@ -66,6 +66,14 @@ class EngineBase(object): def outputs(self): raise NotImplementedError + @property + def itername(self): + """Name for expanded iterable""" + itername = self._id + if self._hierarchy: + itername = '%s.%s' % (self._hierarchy, self._id) + return itername + def clone(self, name): """Clone an EngineBase object @@ -95,6 +103,9 @@ class EngineBase(object): def __str__(self): return self.fullname + def __repr__(self): + return self.itername + def save(self, filename=None): if filename is None: filename = 'temp.pklz' diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py index 5ac9e72fa..af93fd140 100644 --- a/nipype/pipeline/engine/nodes.py +++ b/nipype/pipeline/engine/nodes.py @@ -159,7 +159,6 @@ class Node(EngineBase): self._got_inputs = False self._originputs = None self._output_dir = None - self._id = self.name # for compatibility with node expansion using iterables self.iterables = iterables self.synchronize = synchronize @@ -249,14 +248,6 @@ class Node(EngineBase): if hasattr(self._interface.inputs, 'num_threads'): self._interface.inputs.num_threads = self._n_procs - @property - def itername(self): - """Name for expanded iterable""" - itername = self._id - if self._hierarchy: - itername = '%s.%s' % (self._hierarchy, self._id) - return itername - def output_dir(self): """Return the location of the output directory for the node""" # Output dir is cached diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 4ec36afe6..0bb5351ad 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -507,7 +507,7 @@ def _write_detailed_dot(graph, dotfilename): # write nodes edges = [] for n in nx.topological_sort(graph): - nodename = str(n) + nodename = n.itername inports = [] for u, v, d in graph.in_edges(nbunch=n, data=True): for cd in d['connect']: @@ -519,8 +519,8 @@ def _write_detailed_dot(graph, dotfilename): ipstrip = 'in%s' % _replacefunk(inport) opstrip = 'out%s' % _replacefunk(outport) edges.append( - '%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''), opstrip, - str(v).replace('.', ''), ipstrip)) + '%s:%s:e -> %s:%s:w;' % (u.itername.replace('.', ''), opstrip, + v.itername.replace('.', ''), ipstrip)) if inport not in inports: inports.append(inport) inputstr = ['{IN'] + [
incorrect detailed graphs being generated ### Summary the detailed graph is not listing names of nodes appropriately resulting in incorrect graphs. scroll down to see the multiple arrows containing nodes in the figures of the following notebook. https://miykael.github.io/nipype_tutorial/notebooks/introduction_quickstart.html an example here: ![image](https://user-images.githubusercontent.com/184063/43534422-50e73862-9585-11e8-83d9-697675db63e8.png) i suspect this has something to do with how `__repr__` and `__str__` are being used. @oesteban, @effigies - any quick ideas before i look into the code? ### Actual behavior ### Expected behavior should have printed the old style with multiple nodes. see cell 14 in this notebook: https://github.com/ReproNim/reproducible-imaging/blob/master/notebooks/introductory_dataflows.ipynb ### How to replicate the behavior run the current quickstart notebook in @miykael binder repo.
nipy/nipype
diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 151849241..44afbb2e2 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -441,6 +441,7 @@ def test_write_graph_runs(tmpdir): assert os.path.exists('graph.dot') or os.path.exists( 'graph_detailed.dot') + try: os.remove('graph.dot') except OSError: @@ -484,6 +485,164 @@ def test_deep_nested_write_graph_runs(tmpdir): pass +# examples of dot files used in the following test +dotfile_orig = ['strict digraph {\n', + '"mod1 (engine)";\n', + '"mod2 (engine)";\n', + '"mod1 (engine)" -> "mod2 (engine)";\n', + '}\n'] + +dotfile_detailed_orig = ['digraph structs {\n', + 'node [shape=record];\n', + 'pipemod1 [label="{IN}|{ mod1 | engine | }|{OUT|<outoutput1> output1}"];\n', + 'pipemod2 [label="{IN|<ininput1> input1}|{ mod2 | engine | }|{OUT}"];\n', + 'pipemod1:outoutput1:e -> pipemod2:ininput1:w;\n', + '}'] + + +dotfile_hierarchical = ['digraph pipe{\n', + ' label="pipe";\n', + ' pipe_mod1[label="mod1 (engine)"];\n', + ' pipe_mod2[label="mod2 (engine)"];\n', + ' pipe_mod1 -> pipe_mod2;\n', + '}'] + +dotfile_colored = ['digraph pipe{\n', + ' label="pipe";\n', + ' pipe_mod1[label="mod1 (engine)", style=filled, fillcolor="#FFFFC8"];\n', + ' pipe_mod2[label="mod2 (engine)", style=filled, fillcolor="#FFFFC8"];\n', + ' pipe_mod1 -> pipe_mod2;\n', + '}'] + +dotfiles = { + "orig": dotfile_orig, + "flat": dotfile_orig, + "exec": dotfile_orig, + "hierarchical": dotfile_hierarchical, + "colored": dotfile_colored + } + [email protected]("simple", [True, False]) [email protected]("graph_type", ['orig', 'flat', 'exec', 'hierarchical', 'colored']) +def test_write_graph_dotfile(tmpdir, graph_type, simple): + """ checking dot files for a workflow without iterables""" + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.write_graph( + graph2use=graph_type, simple_form=simple, format='dot') + + with open("graph.dot") as f: + graph_str = f.read() + + if simple: + for line in dotfiles[graph_type]: + assert line in graph_str + else: + # if simple=False graph.dot uses longer names + for line in dotfiles[graph_type]: + if graph_type in ["hierarchical", "colored"]: + assert line.replace("mod1 (engine)", "mod1.EngineTestInterface.engine").replace( + "mod2 (engine)", "mod2.EngineTestInterface.engine") in graph_str + else: + assert line.replace( + "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine").replace( + "mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") in graph_str + + # graph_detailed is the same for orig, flat, exec (if no iterables) + # graph_detailed is not created for hierachical or colored + if graph_type not in ["hierarchical", "colored"]: + with open("graph_detailed.dot") as f: + graph_str = f.read() + for line in dotfile_detailed_orig: + assert line in graph_str + + +# examples of dot files used in the following test +dotfile_detailed_iter_exec = [ + 'digraph structs {\n', + 'node [shape=record];\n', + 'pipemod1aIa1 [label="{IN}|{ a1 | engine | mod1.aI }|{OUT|<outoutput1> output1}"];\n', + 'pipemod2a1 [label="{IN|<ininput1> input1}|{ a1 | engine | mod2 }|{OUT}"];\n', + 'pipemod1aIa0 [label="{IN}|{ a0 | engine | mod1.aI }|{OUT|<outoutput1> output1}"];\n', + 'pipemod2a0 [label="{IN|<ininput1> input1}|{ a0 | engine | mod2 }|{OUT}"];\n', + 'pipemod1aIa0:outoutput1:e -> pipemod2a0:ininput1:w;\n', + 'pipemod1aIa1:outoutput1:e -> pipemod2a1:ininput1:w;\n', + '}'] + +dotfile_iter_hierarchical = [ + 'digraph pipe{\n', + ' label="pipe";\n', + ' pipe_mod1[label="mod1 (engine)", shape=box3d,style=filled, color=black, colorscheme=greys7 fillcolor=2];\n', + ' pipe_mod2[label="mod2 (engine)"];\n', + ' pipe_mod1 -> pipe_mod2;\n', + '}'] + +dotfile_iter_colored = [ + 'digraph pipe{\n', + ' label="pipe";\n', + ' pipe_mod1[label="mod1 (engine)", shape=box3d,style=filled, color=black, colorscheme=greys7 fillcolor=2];\n', + ' pipe_mod2[label="mod2 (engine)", style=filled, fillcolor="#FFFFC8"];\n', + ' pipe_mod1 -> pipe_mod2;\n', + '}'] + +dotfiles_iter = { + "orig": dotfile_orig, + "flat": dotfile_orig, + "exec": dotfile_orig, + "hierarchical": dotfile_iter_hierarchical, + "colored": dotfile_iter_colored + } + +dotfiles_detailed_iter = { + "orig": dotfile_detailed_orig, + "flat": dotfile_detailed_orig, + "exec": dotfile_detailed_iter_exec + } + [email protected]("simple", [True, False]) [email protected]("graph_type", ['orig', 'flat', 'exec', 'hierarchical', 'colored']) +def test_write_graph_dotfile_iterables(tmpdir, graph_type, simple): + """ checking dot files for a workflow with iterables""" + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod1.iterables = ('input1', [1, 2]) + mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.write_graph( + graph2use=graph_type, simple_form=simple, format='dot') + + with open("graph.dot") as f: + graph_str = f.read() + + if simple: + for line in dotfiles_iter[graph_type]: + assert line in graph_str + else: + # if simple=False graph.dot uses longer names + for line in dotfiles_iter[graph_type]: + if graph_type in ["hierarchical", "colored"]: + assert line.replace("mod1 (engine)", "mod1.EngineTestInterface.engine").replace( + "mod2 (engine)", "mod2.EngineTestInterface.engine") in graph_str + else: + assert line.replace( + "mod1 (engine)", "pipe.mod1.EngineTestInterface.engine").replace( + "mod2 (engine)", "pipe.mod2.EngineTestInterface.engine") in graph_str + + # graph_detailed is not created for hierachical or colored + if graph_type not in ["hierarchical", "colored"]: + with open("graph_detailed.dot") as f: + graph_str = f.read() + for line in dotfiles_detailed_iter[graph_type]: + assert line in graph_str + + + def test_io_subclass(): """Ensure any io subclass allows dynamic traits""" from nipype.interfaces.io import IOBase
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 3 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 codecov==2.1.13 configparser==5.2.0 coverage==6.2 cycler==0.11.0 decorator==4.4.2 docutils==0.18.1 execnet==1.9.0 funcsigs==1.0.2 future==1.0.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 isodate==0.6.1 Jinja2==3.0.3 kiwisolver==1.3.1 lxml==5.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mock==5.2.0 networkx==2.5.1 nibabel==3.2.2 -e git+https://github.com/nipy/nipype.git@69dce12bdf256c3bb0a8b6da9a1d1cdce48b66ec#egg=nipype numpy==1.19.5 numpydoc==1.1.0 packaging==21.3 Pillow==8.4.0 pluggy==1.0.0 prov==1.5.0 py==1.11.0 pydot==1.4.2 pydotplus==2.0.2 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-env==0.6.2 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 rdflib==5.0.0 requests==2.27.1 scipy==1.5.4 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 traits==6.4.1 typing_extensions==4.1.1 urllib3==1.26.20 yapf==0.32.0 zipp==3.6.0
name: nipype channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - click==8.0.4 - codecov==2.1.13 - configparser==5.2.0 - coverage==6.2 - cycler==0.11.0 - decorator==4.4.2 - docutils==0.18.1 - execnet==1.9.0 - funcsigs==1.0.2 - future==1.0.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isodate==0.6.1 - jinja2==3.0.3 - kiwisolver==1.3.1 - lxml==5.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mock==5.2.0 - networkx==2.5.1 - nibabel==3.2.2 - numpy==1.19.5 - numpydoc==1.1.0 - packaging==21.3 - pillow==8.4.0 - pluggy==1.0.0 - prov==1.5.0 - py==1.11.0 - pydot==1.4.2 - pydotplus==2.0.2 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-env==0.6.2 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - rdflib==5.0.0 - requests==2.27.1 - scipy==1.5.4 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - traits==6.4.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - yapf==0.32.0 - zipp==3.6.0 prefix: /opt/conda/envs/nipype
[ "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[exec-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[exec-False]" ]
[ "nipype/pipeline/engine/tests/test_engine.py::test_parameterize_dirs_false" ]
[ "nipype/pipeline/engine/tests/test_engine.py::test_2mods[iterables0-expected0]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[hierarchical-True]", "nipype/pipeline/engine/tests/test_engine.py::test_1mod[iterables0-expected0]", "nipype/pipeline/engine/tests/test_engine.py::test_io_subclass", "nipype/pipeline/engine/tests/test_engine.py::test_iterable_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_synchronize_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_3mods[iterables1-expected1-connect1]", "nipype/pipeline/engine/tests/test_engine.py::test_3mods[iterables0-expected0-connect0]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[colored-False]", "nipype/pipeline/engine/tests/test_engine.py::test_itersource_synchronize1_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[hierarchical-False]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[hierarchical-False]", "nipype/pipeline/engine/tests/test_engine.py::test_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_itersource_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[colored-True]", "nipype/pipeline/engine/tests/test_engine.py::test_3mods[iterables2-expected2-connect2]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[colored-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[exec-False]", "nipype/pipeline/engine/tests/test_engine.py::test_old_config", "nipype/pipeline/engine/tests/test_engine.py::test_2mods[iterables1-expected1]", "nipype/pipeline/engine/tests/test_engine.py::test_itersource_synchronize2_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[flat-False]", "nipype/pipeline/engine/tests/test_engine.py::test_1mod[iterables1-expected1]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[colored-False]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[hierarchical-True]", "nipype/pipeline/engine/tests/test_engine.py::test_2mods[iterables2-expected2]", "nipype/pipeline/engine/tests/test_engine.py::test_synchronize_tuples_expansion", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[orig-False]", "nipype/pipeline/engine/tests/test_engine.py::test_mapnode_json", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[exec-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[orig-False]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[orig-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[orig-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[flat-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile_iterables[flat-False]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_dotfile[flat-True]", "nipype/pipeline/engine/tests/test_engine.py::test_write_graph_runs", "nipype/pipeline/engine/tests/test_engine.py::test_deep_nested_write_graph_runs", "nipype/pipeline/engine/tests/test_engine.py::test_serial_input" ]
[]
Apache License 2.0
2,855
[ "nipype/pipeline/engine/utils.py", "nipype/pipeline/engine/base.py", "nipype/pipeline/engine/nodes.py" ]
[ "nipype/pipeline/engine/utils.py", "nipype/pipeline/engine/base.py", "nipype/pipeline/engine/nodes.py" ]
nipy__nipype-2673
69dce12bdf256c3bb0a8b6da9a1d1cdce48b66ec
2018-08-02 16:39:58
24e52aa4229fc3ce7c3e3ac79b3317999d35f1d3
diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py index 4ec36afe6..cc47de5d4 100644 --- a/nipype/pipeline/engine/utils.py +++ b/nipype/pipeline/engine/utils.py @@ -233,15 +233,78 @@ def write_report(node, report_type=None, is_mapnode=False): return +def _identify_collapses(hastraits): + """ Identify traits that will collapse when being set to themselves. + + ``OutputMultiObject``s automatically unwrap a list of length 1 to directly + reference the element of that list. + If that element is itself a list of length 1, then the following will + result in modified values. + + hastraits.trait_set(**hastraits.trait_get()) + + Cloning performs this operation on a copy of the original traited object, + allowing us to identify traits that will be affected. + """ + raw = hastraits.trait_get() + cloned = hastraits.clone_traits().trait_get() + + collapsed = set() + for key in cloned: + orig = raw[key] + new = cloned[key] + # Allow numpy to handle the equality checks, as mixed lists and arrays + # can be problematic. + if isinstance(orig, list) and len(orig) == 1 and ( + not np.array_equal(orig, new) and np.array_equal(orig[0], new)): + collapsed.add(key) + + return collapsed + + +def _uncollapse(indexable, collapsed): + """ Wrap collapsible values in a list to prevent double-collapsing. + + Should be used with _identify_collapses to provide the following + idempotent operation: + + collapsed = _identify_collapses(hastraits) + hastraits.trait_set(**_uncollapse(hastraits.trait_get(), collapsed)) + + NOTE: Modifies object in-place, in addition to returning it. + """ + + for key in indexable: + if key in collapsed: + indexable[key] = [indexable[key]] + return indexable + + +def _protect_collapses(hastraits): + """ A collapse-protected replacement for hastraits.trait_get() + + May be used as follows to provide an idempotent trait_set: + + hastraits.trait_set(**_protect_collapses(hastraits)) + """ + collapsed = _identify_collapses(hastraits) + return _uncollapse(hastraits.trait_get(), collapsed) + + def save_resultfile(result, cwd, name): """Save a result pklz file to ``cwd``""" resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) if result.outputs: try: - outputs = result.outputs.trait_get() + collapsed = _identify_collapses(result.outputs) + outputs = _uncollapse(result.outputs.trait_get(), collapsed) + # Double-protect tosave so that the original, uncollapsed trait + # is saved in the pickle file. Thus, when the loading process + # collapses, the original correct value is loaded. + tosave = _uncollapse(outputs.copy(), collapsed) except AttributeError: - outputs = result.outputs.dictcopy() # outputs was a bunch - result.outputs.set(**modify_paths(outputs, relative=True, basedir=cwd)) + tosave = outputs = result.outputs.dictcopy() # outputs was a bunch + result.outputs.set(**modify_paths(tosave, relative=True, basedir=cwd)) savepkl(resultsfile, result) logger.debug('saved results in %s', resultsfile) @@ -293,7 +356,7 @@ def load_resultfile(path, name): else: if result.outputs: try: - outputs = result.outputs.trait_get() + outputs = _protect_collapses(result.outputs) except AttributeError: outputs = result.outputs.dictcopy() # outputs == Bunch try:
Node de-listifies output lists with a single element ### Summary / Actual behavior A `Select` interface can return a single-element list, when that is the element selected. However, if placed in a `Node`, this will be unwrapped and the output will be the element itself. ### Expected behavior `Node` should not modify the outputs of an interface. ### How to replicate the behavior ```Python from nipype.pipeline import engine as pe from nipype.interfaces import utility as niu select_if = niu.Select(inlist=[[1, 2, 3], [4]], index=1) select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4]], index=1), name='select_nd') ifres = select_if.run() ndres = select_nd.run() assert ifres.outputs.out == [4] assert ndres.outputs.out == 4 ``` ### Platform details: ``` {'pkg_path': '/anaconda3/lib/python3.6/site-packages/nipype', 'commit_source': 'archive substitution', 'commit_hash': '%h', 'nipype_version': '1.1.0', 'sys_version': '3.6.5 |Anaconda, Inc.| (default, Apr 26 2018, 08:42:37) \n[GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)]', 'sys_executable': '/anaconda3/bin/python', 'sys_platform': 'darwin', 'numpy_version': '1.14.3', 'scipy_version': '1.1.0', 'networkx_version': '2.1', 'nibabel_version': '2.3.0', 'traits_version': '4.6.0'} 1.1.0 ``` ### Execution environment Choose one - My python environment outside container
nipy/nipype
diff --git a/nipype/pipeline/engine/tests/test_nodes.py b/nipype/pipeline/engine/tests/test_nodes.py index 4a04b9476..ea03fe69a 100644 --- a/nipype/pipeline/engine/tests/test_nodes.py +++ b/nipype/pipeline/engine/tests/test_nodes.py @@ -290,3 +290,18 @@ def test_inputs_removal(tmpdir): n1.overwrite = True n1.run() assert not tmpdir.join(n1.name, 'file1.txt').check() + + +def test_outputmultipath_collapse(tmpdir): + """Test an OutputMultiPath whose initial value is ``[[x]]`` to ensure that + it is returned as ``[x]``, regardless of how accessed.""" + select_if = niu.Select(inlist=[[1, 2, 3], [4]], index=1) + select_nd = pe.Node(niu.Select(inlist=[[1, 2, 3], [4]], index=1), + name='select_nd') + + ifres = select_if.run() + ndres = select_nd.run() + + assert ifres.outputs.out == [4] + assert ndres.outputs.out == [4] + assert select_nd.result.outputs.out == [4]
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 attrs==22.2.0 Babel==2.11.0 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 codecov==2.1.13 configparser==5.2.0 coverage==6.2 cycler==0.11.0 decorator==4.4.2 docutils==0.18.1 execnet==1.9.0 funcsigs==1.0.2 future==1.0.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 isodate==0.6.1 Jinja2==3.0.3 kiwisolver==1.3.1 lxml==5.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mock==5.2.0 networkx==2.5.1 nibabel==3.2.2 -e git+https://github.com/nipy/nipype.git@69dce12bdf256c3bb0a8b6da9a1d1cdce48b66ec#egg=nipype numpy==1.19.5 numpydoc==1.1.0 packaging==21.3 Pillow==8.4.0 pluggy==1.0.0 prov==1.5.0 py==1.11.0 pydot==1.4.2 pydotplus==2.0.2 Pygments==2.14.0 pyparsing==3.1.4 pytest==7.0.1 pytest-cov==4.0.0 pytest-env==0.6.2 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 pytz==2025.2 rdflib==5.0.0 requests==2.27.1 scipy==1.5.4 simplejson==3.20.1 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 tomli==1.2.3 traits==6.4.1 typing_extensions==4.1.1 urllib3==1.26.20 yapf==0.32.0 zipp==3.6.0
name: nipype channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - attrs==22.2.0 - babel==2.11.0 - charset-normalizer==2.0.12 - click==8.0.4 - codecov==2.1.13 - configparser==5.2.0 - coverage==6.2 - cycler==0.11.0 - decorator==4.4.2 - docutils==0.18.1 - execnet==1.9.0 - funcsigs==1.0.2 - future==1.0.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - isodate==0.6.1 - jinja2==3.0.3 - kiwisolver==1.3.1 - lxml==5.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mock==5.2.0 - networkx==2.5.1 - nibabel==3.2.2 - numpy==1.19.5 - numpydoc==1.1.0 - packaging==21.3 - pillow==8.4.0 - pluggy==1.0.0 - prov==1.5.0 - py==1.11.0 - pydot==1.4.2 - pydotplus==2.0.2 - pygments==2.14.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-cov==4.0.0 - pytest-env==0.6.2 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - pytz==2025.2 - rdflib==5.0.0 - requests==2.27.1 - scipy==1.5.4 - simplejson==3.20.1 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - tomli==1.2.3 - traits==6.4.1 - typing-extensions==4.1.1 - urllib3==1.26.20 - yapf==0.32.0 - zipp==3.6.0 prefix: /opt/conda/envs/nipype
[ "nipype/pipeline/engine/tests/test_nodes.py::test_outputmultipath_collapse" ]
[]
[ "nipype/pipeline/engine/tests/test_nodes.py::test_node_init", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_type[3-f_exp0]", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_type[x_inp1-f_exp1]", "nipype/pipeline/engine/tests/test_nodes.py::test_outputs_removal", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_type[Str-f_exp4]", "nipype/pipeline/engine/tests/test_nodes.py::test_node_get_output", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_check", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_expansion", "nipype/pipeline/engine/tests/test_nodes.py::test_inputs_removal", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_type[x_inp3-f_exp3]", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_type[x_inp2-f_exp2]", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_iterfield_type[x_inp5-f_exp5]", "nipype/pipeline/engine/tests/test_nodes.py::test_mapnode_nested", "nipype/pipeline/engine/tests/test_nodes.py::test_node_hash" ]
[]
Apache License 2.0
2,856
[ "nipype/pipeline/engine/utils.py" ]
[ "nipype/pipeline/engine/utils.py" ]
dwavesystems__dimod-240
8ebfffa42319aa4850cfc5a1c99a8711eac44722
2018-08-02 17:03:19
8ebfffa42319aa4850cfc5a1c99a8711eac44722
diff --git a/dimod/binary_quadratic_model.py b/dimod/binary_quadratic_model.py index 970163c3..3cbe679d 100644 --- a/dimod/binary_quadratic_model.py +++ b/dimod/binary_quadratic_model.py @@ -46,12 +46,12 @@ of a problem. """ from __future__ import absolute_import, division -from collections import Sized, Container, Iterable +from collections import Sized, Container, Iterable, OrderedDict from numbers import Number import numpy as np -from six import itervalues, iteritems, iterkeys, viewkeys +from six import itervalues, iteritems, iterkeys from dimod.decorators import vartype_argument from dimod.response import SampleView @@ -263,11 +263,6 @@ class BinaryQuadraticModel(Sized, Container, Iterable): def __iter__(self): return self.adj.__iter__() - @property - def variables(self): - """Returns BQM's variables as a dictionary view object.""" - return viewkeys(self.linear) - ################################################################################################## # vartype properties ################################################################################################## @@ -2200,3 +2195,37 @@ class BinaryQuadraticModel(Sized, Container, Iterable): bqm.add_interaction(u, v, 0.0) return bqm + + +class OrderedBinaryQuadraticModel(BinaryQuadraticModel): + """Consistently ordered variant of :class:`BinaryQuadraticModel`. + + Uses :class:`collections.OrderedDict` to store the linear and quadratic biases. Note that + :attr:`~.BinaryQuadraticModel.adj` remains unordered. + + Variables are ordered by insertion. This is well defined if adding the variable/interactions + singly, but not when constructed from unordered mappings like dicts. + + Examples: + + >>> bqm = dimod.OrderedBinaryQuadraticModel.empty(dimod.SPIN) + >>> bqm.add_variable('a', .5) + >>> bqm.add_interaction('a', 'b', 1.5) + >>> bqm.linear + OrderedDict([('a', 0.5), ('b', 0.0)]) + >>> bqm.quadratic + OrderedDict([(('a', 'b'), 1.5)]) + + """ + @vartype_argument('vartype') + def __init__(self, linear, quadratic, offset, vartype, **kwargs): + self.linear = OrderedDict() + self.quadratic = OrderedDict() + self.adj = {} + self.offset = offset # we are agnostic to type, though generally should behave like a number + self.vartype = vartype + self.info = kwargs # any additional kwargs are kept as info (metadata) + + # add linear, quadratic + self.add_variables_from(linear) + self.add_interactions_from(quadratic) diff --git a/docs/reference/binary_quadratic_model.rst b/docs/reference/binary_quadratic_model.rst index a2d55fea..ac3d3ce4 100644 --- a/docs/reference/binary_quadratic_model.rst +++ b/docs/reference/binary_quadratic_model.rst @@ -108,3 +108,9 @@ Converting to other types BinaryQuadraticModel.to_numpy_vectors BinaryQuadraticModel.to_qubo BinaryQuadraticModel.to_pandas_dataframe + + +OrderedBinaryQuadraticModel +=========================== + +.. autoclass:: OrderedBinaryQuadraticModel
Allow user-specified mapping type for `BinaryQuadraticModel`s ``` bqm = dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype, mapping_type=OrderedDict) ``` This would affect `.linear` and `.quadratic`, possibly `.adj` as well.
dwavesystems/dimod
diff --git a/tests/test_binary_quadratic_model.py b/tests/test_binary_quadratic_model.py index de1211b3..958e1904 100644 --- a/tests/test_binary_quadratic_model.py +++ b/tests/test_binary_quadratic_model.py @@ -20,6 +20,8 @@ import itertools import tempfile import shutil import json +import collections + from os import path import jsonschema @@ -135,7 +137,6 @@ class TestBinaryQuadraticModel(unittest.TestCase): self.assertEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, 'BINARY').vartype, dimod.BINARY) - def test_construction_quadratic(self): linear = {v: v * .01 for v in range(10)} quadratic = {(u, v): u * v * .01 for u, v in itertools.combinations(linear, 2)} @@ -226,18 +227,6 @@ class TestBinaryQuadraticModel(unittest.TestCase): self.assertEqual(set(bqm), {'a', 'b'}) - def test_variables(self): - bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY) - - self.assertEqual(set(bqm.variables), set()) - - bqm.add_interaction('a', 'b', -1) - - self.assertEqual(set(bqm.variables), {'a', 'b'}) - self.assertIn('a', bqm.variables) - self.assertEqual(bqm.variables & {'b'}, {'b'}) - self.assertEqual(bqm.variables | {'c'}, {'a', 'b', 'c'}) - def test_add_variable(self): bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN) bqm.add_variable('a', .5) @@ -1887,3 +1876,18 @@ class TestConvert(unittest.TestCase): self.assertIn('tag', new_bqm.info) self.assertEqual(new_bqm.info['tag'], 5) self.assertIn(('a', "complex key"), new_bqm.linear) + + +class TestOrderedBQM(unittest.TestCase): + def test_construction(self): + bqm = dimod.OrderedBinaryQuadraticModel.empty(dimod.SPIN) + bqm.add_variable('a', .5) + bqm.add_interaction('c', 'a', 1.5) + bqm.add_interaction('a', 'c', -1.) + bqm.add_interaction('a', 'b', -1.) + bqm.add_variables_from([('b', 1.0), ('c', 1.0)]) + bqm.add_interaction('e', 'd', 0.0) + + self.assertEqual(bqm.linear, collections.OrderedDict([('a', .5), ('c', 1.), ('b', 1.), ('e', 0.), ('d', 0.)])) + self.assertEqual(bqm.quadratic, + collections.OrderedDict([(('c', 'a'), .5), (('a', 'b'), -1.), (('e', 'd'), 0.0)]))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 2 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 decorator==5.1.1 -e git+https://github.com/dwavesystems/dimod.git@8ebfffa42319aa4850cfc5a1c99a8711eac44722#egg=dimod idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 jsonschema==2.6.0 mock==2.0.0 networkx==2.0 numpy==1.14.5 packaging==21.3 pandas==0.22.0 pbr==6.1.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.27.1 six==1.11.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: dimod channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - decorator==5.1.1 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jsonschema==2.6.0 - mock==2.0.0 - networkx==2.0 - numpy==1.14.5 - packaging==21.3 - pandas==0.22.0 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.27.1 - six==1.11.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/dimod
[ "tests/test_binary_quadratic_model.py::TestOrderedBQM::test_construction" ]
[]
[ "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__contains__", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__eq__", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__iter__", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__len__", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__repr__", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interaction", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interaction_counterpart", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interactions_from", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_offset", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variable", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variable_counterpart", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variables_from", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_binary_property", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_binary_property_relabel", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_change_vartype", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_constract_variables", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction_quadratic", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction_vartype", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_copy", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_fix_variable", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_flip_variable", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_partial_relabel_copy", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_partial_relabel_inplace", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical_copy", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical_inplace", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_with_identity", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_with_overlap", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_interaction", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_interactions_from", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_offset", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_variable", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_variables_from", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_scale", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_spin_property", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_spin_property_relabel", "tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_update", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_file_BINARY", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_file_SPIN", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_file_empty_BINARY", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_file_empty_SPIN", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_string_BINARY", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_string_SPIN", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_string_empty_BINARY", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_string_empty_SPIN", "tests/test_binary_quadratic_model.py::TestConvert::test_coo_functional_two_digit_integers_string", "tests/test_binary_quadratic_model.py::TestConvert::test_empty", "tests/test_binary_quadratic_model.py::TestConvert::test_from_coo_file", "tests/test_binary_quadratic_model.py::TestConvert::test_from_coo_string", "tests/test_binary_quadratic_model.py::TestConvert::test_from_ising", "tests/test_binary_quadratic_model.py::TestConvert::test_from_numpy_matrix", "tests/test_binary_quadratic_model.py::TestConvert::test_from_numpy_vectors", "tests/test_binary_quadratic_model.py::TestConvert::test_from_numpy_vectors_labels", "tests/test_binary_quadratic_model.py::TestConvert::test_from_qubo", "tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json", "tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json_empty", "tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json_with_info", "tests/test_binary_quadratic_model.py::TestConvert::test_info", "tests/test_binary_quadratic_model.py::TestConvert::test_to_coo_string_empty_BINARY", "tests/test_binary_quadratic_model.py::TestConvert::test_to_coo_string_empty_SPIN", "tests/test_binary_quadratic_model.py::TestConvert::test_to_coo_string_typical_BINARY", "tests/test_binary_quadratic_model.py::TestConvert::test_to_coo_string_typical_SPIN", "tests/test_binary_quadratic_model.py::TestConvert::test_to_ising_binary_to_ising", "tests/test_binary_quadratic_model.py::TestConvert::test_to_ising_spin_to_ising", "tests/test_binary_quadratic_model.py::TestConvert::test_to_json_file", "tests/test_binary_quadratic_model.py::TestConvert::test_to_json_file_empty", "tests/test_binary_quadratic_model.py::TestConvert::test_to_json_string", "tests/test_binary_quadratic_model.py::TestConvert::test_to_json_string_empty", "tests/test_binary_quadratic_model.py::TestConvert::test_to_networkx_graph", "tests/test_binary_quadratic_model.py::TestConvert::test_to_numpy_matrix", "tests/test_binary_quadratic_model.py::TestConvert::test_to_numpy_vectors", "tests/test_binary_quadratic_model.py::TestConvert::test_to_numpy_vectors_labels_sorted", "tests/test_binary_quadratic_model.py::TestConvert::test_to_numpy_vectors_sorted", "tests/test_binary_quadratic_model.py::TestConvert::test_to_pandas_dataframe", "tests/test_binary_quadratic_model.py::TestConvert::test_to_qubo_binary_to_qubo", "tests/test_binary_quadratic_model.py::TestConvert::test_to_qubo_spin_to_qubo" ]
[]
Apache License 2.0
2,857
[ "dimod/binary_quadratic_model.py", "docs/reference/binary_quadratic_model.rst" ]
[ "dimod/binary_quadratic_model.py", "docs/reference/binary_quadratic_model.rst" ]
pennmem__cmlreaders-169
355bb312d51b4429738ea491b7cfea4d2fec490c
2018-08-02 17:07:52
355bb312d51b4429738ea491b7cfea4d2fec490c
diff --git a/cmlreaders/base_reader.py b/cmlreaders/base_reader.py index 08f4c49..9a6da5d 100644 --- a/cmlreaders/base_reader.py +++ b/cmlreaders/base_reader.py @@ -148,6 +148,7 @@ class BaseCMLReader(object, metaclass=_MetaReader): Subject code to use; required when we need to determine the protocol experiment session + data_type """ if subject is None: diff --git a/cmlreaders/readers/eeg.py b/cmlreaders/readers/eeg.py index 0c8dd9b..727e617 100644 --- a/cmlreaders/readers/eeg.py +++ b/cmlreaders/readers/eeg.py @@ -102,14 +102,49 @@ class BaseEEGReader(ABC): self.epochs = epochs self.scheme = scheme - self._unique_contacts = np.union1d( - self.scheme["contact_1"], - self.scheme["contact_2"] - ) if self.scheme is not None else None + try: + if self.scheme_type == "contacts": + self._unique_contacts = self.scheme.contact.unique() + elif self.scheme_type == "pairs": + self._unique_contacts = np.union1d( + self.scheme["contact_1"], + self.scheme["contact_2"] + ) + else: + self._unique_contacts = None + except KeyError: + self._unique_contacts = None # in cases where we can't rereference, this will get changed to False self.rereferencing_possible = True + @property + def scheme_type(self) -> Union[str, None]: + """Returns "contacts" when the input scheme is in the form of monopolar + contacts and "pairs" when bipolar. + + Returns + ------- + The scheme type or ``None`` is no scheme was specified. + + Raises + ------ + KeyError + When the passed scheme doesn't include any of the following keys: + ``contact_1``, ``contact_2``, ``contact`` + + """ + if self.scheme is None: + return None + + if "contact_1" in self.scheme and "contact_2" in self.scheme: + return "pairs" + elif "contact" in self.scheme: + return "contacts" + else: + raise KeyError("The passed scheme appears to be neither contacts " + "nor pairs") + def include_contact(self, contact_num: int): """Filter to determine if we need to include a contact number when reading data. @@ -126,7 +161,7 @@ class BaseEEGReader(ABC): def rereference(self, data: np.ndarray, contacts: List[int]) -> Tuple[np.ndarray, List[str]]: - """Attempt to rereference the EEG data using the specified scheme. + """Rereference and/or select a subset of raw channels. Parameters ---------- @@ -154,15 +189,22 @@ class BaseEEGReader(ABC): for i, c in enumerate(contacts) } - c1 = [contact_to_index[c] for c in self.scheme["contact_1"] - if c in contact_to_index] - c2 = [contact_to_index[c] for c in self.scheme["contact_2"] - if c in contact_to_index] + if self.scheme_type == "pairs": + c1 = [contact_to_index[c] for c in self.scheme["contact_1"] + if c in contact_to_index] + c2 = [contact_to_index[c] for c in self.scheme["contact_2"] + if c in contact_to_index] - reref = np.array( - [data[i, c1, :] - data[i, c2, :] for i in range(data.shape[0])] - ) - return reref, self.scheme["label"].tolist() + reref = np.array( + [data[i, c1, :] - data[i, c2, :] for i in range(data.shape[0])] + ) + return reref, self.scheme["label"].tolist() + else: + channels = [contact_to_index[c] for c in self.scheme["contact"]] + subset = np.array( + [data[i, channels, :] for i in range(data.shape[0])] + ) + return subset, self.scheme["label"].tolist() class NumpyEEGReader(BaseEEGReader): @@ -261,7 +303,7 @@ class RamulatorHDF5Reader(BaseEEGReader): passed scheme or if rereferencing is even possible in the first place. """ - if self.rereferencing_possible: + if self.rereferencing_possible or self.scheme_type == "contacts": return BaseEEGReader.rereference(self, data, contacts) with h5py.File(self.filename, 'r') as hfile:
contacts/pairs scheme input I'm still getting a bit of a handle how to use cmlreaders to load eeg vs using PTSA, and I have some questions about the `scheme` input. I see that if you enter a set of pairs, the code will automatically rereference the data, and the `channels` parameter will be defined by the pairs `label` column. However, is there a way to simply load a specific non-bipolar channel? In the PTSA eeg functions, as well as the old matlab eeg_toolbox, you could enter an arbitrary channel number and load that data. Also, if you don't enter a scheme, the `channels` parameter is just a list of incremented channel numbers, instead of the label from the json file. I guess another way of summarizing what I'm asking is, given that you can load pairs.json and pass those as a scheme to `load_eeg`, could we similarly load contacts.json, filter it to any subset, and pass those in as a scheme?
pennmem/cmlreaders
diff --git a/cmlreaders/test/test_eeg.py b/cmlreaders/test/test_eeg.py index a870afa..acfd8fd 100644 --- a/cmlreaders/test/test_eeg.py +++ b/cmlreaders/test/test_eeg.py @@ -24,6 +24,11 @@ from cmlreaders.test.utils import patched_cmlreader from cmlreaders.warnings import MissingChannelsWarning +class DummyReader(BaseEEGReader): + def read(self): + return + + @pytest.fixture def events(): with patched_cmlreader(): @@ -51,18 +56,18 @@ class TestEEGMetaReader: class TestBaseEEGReader: + @staticmethod + def make_reader(scheme=None): + return DummyReader("", np.int16, [(0, None)], scheme=scheme) + @pytest.mark.parametrize("use_scheme", [True, False]) def test_include_contact(self, use_scheme): - class DummyReader(BaseEEGReader): - def read(self): - return - scheme = pd.DataFrame({ "contact_1": list(range(1, 10)), "contact_2": list(range(2, 11)), }) if use_scheme else None - reader = DummyReader("", np.int16, [(0, None)], scheme=scheme) + reader = self.make_reader(scheme) if use_scheme: assert len(reader._unique_contacts) == 10 @@ -73,6 +78,28 @@ class TestBaseEEGReader: else: assert not reader.include_contact(i) + @pytest.mark.parametrize("filename,expected", [ + (resource_filename("cmlreaders.test.data", "contacts.json"), "contacts"), + (resource_filename("cmlreaders.test.data", "pairs.json"), "pairs"), + ("", None) + ]) + def test_scheme_type(self, filename, expected): + if len(filename): + scheme = MontageReader.fromfile(filename, data_type=expected) + else: + scheme = pd.DataFrame({"foo": [1, 2, 3]}) + + reader = self.make_reader(scheme) + + if expected is None: + with pytest.raises(KeyError): + _ = reader.scheme_type # noqa + + reader = self.make_reader() + assert reader.scheme_type is None + else: + assert reader.scheme_type == expected + class TestFileReaders: def get_finder(self, subject, experiment, session, rootdir): @@ -244,12 +271,14 @@ class TestEEGReader: with pytest.raises(ErrorType): reader.load_eeg(events=word_events) - @pytest.mark.parametrize("subject,reref_possible,index,channel", [ - ("R1384J", False, 43, "LS12-LS1"), - ("R1111M", True, 43, "LPOG23-LPOG31"), + @pytest.mark.parametrize("subject,scheme_type,reref_possible,index,channel", [ + ("R1384J", "pairs", False, 43, "LS12-LS1"), + ("R1111M", "pairs", True, 43, "LPOG23-LPOG31"), + ("R1111M", "contacts", True, 43, "LPOG44"), + ("R1286J", "contacts", True, 43, "LJ16") ]) - def test_rereference(self, subject, reref_possible, index, channel, - rhino_root): + def test_rereference(self, subject, scheme_type, reref_possible, index, + channel, rhino_root): reader = CMLReader(subject=subject, experiment='FR1', session=0, rootdir=rhino_root) rate = reader.load("sources")["sample_rate"] @@ -259,22 +288,28 @@ class TestEEGReader: rel_start, rel_stop = 0, 100 expected_samples = int(rate * rel_stop / 1000) - scheme = reader.load('pairs') + scheme = reader.load(scheme_type) load_eeg = partial(reader.load_eeg, events=events, rel_start=rel_start, rel_stop=rel_stop) - if reref_possible: - data = load_eeg() - assert data.shape == (1, 100, expected_samples) + if scheme_type == "pairs": + if reref_possible: + data = load_eeg() + assert data.shape == (1, 100, expected_samples) + data = load_eeg(scheme=scheme) + assert data.shape == (1, 141, expected_samples) + assert data.channels[index] == channel + else: + data_noreref = load_eeg() + data_reref = load_eeg(scheme=scheme) + assert_equal(data_noreref.data, data_reref.data) + assert data_reref.channels[index] == channel + else: data = load_eeg(scheme=scheme) - assert data.shape == (1, 141, expected_samples) + count = 100 if subject == "R1111M" else 124 + assert data.shape == (1, count, expected_samples) assert data.channels[index] == channel - else: - data_noreref = load_eeg() - data_reref = load_eeg(scheme=scheme) - assert_equal(data_noreref.data, data_reref.data) - assert data_reref.channels[index] == channel @pytest.mark.rhino @pytest.mark.parametrize("subject,region_key,region_name,expected_channels,tlen", [
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": null, "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 cached-property==1.5.2 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/pennmem/cmlreaders.git@355bb312d51b4429738ea491b7cfea4d2fec490c#egg=cmlreaders codecov==2.1.13 coverage==6.2 cycler==0.11.0 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 flake8==3.9.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.6.1 mistune==0.8.4 mne==0.23.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.7.0 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 zipp==3.6.0
name: cmlreaders channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cached-property==1.5.2 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cycler==0.11.0 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - flake8==3.9.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.6.1 - mistune==0.8.4 - mne==0.23.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cmlreaders
[ "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_scheme_type[/cmlreaders/cmlreaders/test/data/contacts.json-contacts]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_scheme_type[/cmlreaders/cmlreaders/test/data/pairs.json-pairs]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_scheme_type[-None]" ]
[ "cmlreaders/test/test_eeg.py::TestFileReaders::test_split_eeg_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_split_eeg_reader_missing_contacts", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1345D-FR1-0]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1363T-FR1-0]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1392N-PAL1-0]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_rereference", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[R1298E-87-CH88]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[R1387E-13-CH14]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_read_whole_session[R1161E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader_with_events[R1161E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader_with_events[R1387E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1384J-pairs-False-43-LS12-LS1]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1111M-pairs-True-43-LPOG23-LPOG31]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1111M-contacts-True-43-LPOG44]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1286J-contacts-True-43-LJ16]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1384J-ind.region-insula-10-200]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1288P-ind.region-lateralorbitofrontal-5-200]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1111M-ind.region-middletemporal-18-100]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[RamulatorHDF5Reader-True]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[RamulatorHDF5Reader-False]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_multisession[subjects0-experiments0]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_multisession[subjects1-experiments1]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_multisession[subjects2-experiments2]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_channel_discrepancies[R1387E-catFR5-0-120-125]" ]
[ "cmlreaders/test/test_eeg.py::TestEEGMetaReader::test_load[R1389J-sources.json-int16-1641165-1000]", "cmlreaders/test/test_eeg.py::TestEEGMetaReader::test_load[TJ001-TJ001_pyFR_params.txt-int16-None-400.0]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_include_contact[True]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_include_contact[False]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_npy_reader", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_absolute[TJ001-TJ001_events.mat-expected_basenames0]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_absolute[R1389J-task_events.json-expected_basenames1]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[SplitEEGReader-True]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_with_empty_events" ]
[]
null
2,858
[ "cmlreaders/readers/eeg.py", "cmlreaders/base_reader.py" ]
[ "cmlreaders/readers/eeg.py", "cmlreaders/base_reader.py" ]
scieloorg__xylose-158
135018ec4be1a30320d1f59caee922ee4a730f41
2018-08-02 17:20:25
6bb32ebe34da88381518e84790963315b54db9c8
diff --git a/xylose/scielodocument.py b/xylose/scielodocument.py index da4d426..f3559c9 100644 --- a/xylose/scielodocument.py +++ b/xylose/scielodocument.py @@ -1,7 +1,6 @@ # encoding: utf-8 import sys from functools import wraps -import warnings import re import unicodedata import datetime @@ -2826,80 +2825,33 @@ class Citation(object): if self.publication_type == 'article': return self.data.get('v35', None) - @property - def analytic_institution_authors(self): - """ - It retrieves the analytic institution authors of a reference, - no matter the publication type of the reference. - It is not desirable to restrict the conditioned return to the - publication type, because some reference standards are very peculiar - and not only articles or books have institution authors. - IT REPLACES analytic_institution - """ - institutions = [] - for institution in self.data.get('v11', []): - institutions.append(html_decode(institution['_'])) - if len(institutions) > 0: - return institutions @property def analytic_institution(self): """ This method retrieves the institutions in the given citation. The citation must be an article or book citation, if it exists. - IT WILL BE DEPRECATED """ - warn_future_deprecation( - 'analytic_institution', - 'analytic_institution_authors', - 'analytic_institution_authors is more suitable name and ' - 'returns the authors independending on publication type' - ) institutions = [] - if self.publication_type in [u'article', u'book']: + if self.publication_type in [u'article', u'book'] and 'v11' in self.data: if 'v11' in self.data: for institution in self.data['v11']: - institutions.append(html_decode(institution['_'])) + institutions.append(html_decode(self.data['v11'][0]['_'])) if len(institutions) > 0: return institutions - @property - def monographic_institution_authors(self): - """ - It retrieves the monographic institution authors of a reference, - no matter the publication type of the reference. - It is not desirable to restrict the conditioned return to the - publication type, because some reference standards are very peculiar - and not only books have institution authors. - IT REPLACES monographic_institution - """ - if 'v30' in self.data: - return - institutions = [] - for institution in self.data.get('v17', []): - institutions.append(html_decode(institution['_'])) - if len(institutions) > 0: - return institutions - @property def monographic_institution(self): """ This method retrieves the institutions in the given citation. The citation must be a book citation, if it exists. - IT WILL BE DEPRECATED """ - warn_future_deprecation( - 'monographic_institution', - 'monographic_institution_authors', - 'monographic_institution_authors is more suitable name and ' - 'returns the authors independending on publication type' - ) institutions = [] if self.publication_type == u'book' and 'v17' in self.data: if 'v17' in self.data: for institution in self.data['v17']: - institutions.append(html_decode(institution['_'])) + institutions.append(html_decode(self.data['v17'][0]['_'])) if len(institutions) > 0: return institutions @@ -3063,13 +3015,40 @@ class Citation(object): ma = self.monographic_authors or [] return aa + ma + @property + def analytic_person_authors(self): + """ + It retrieves the analytic person authors of a reference, + no matter the publication type of the reference. + It is not desirable to restrict the conditioned return to the + publication type, because some reference standards are very peculiar + and not only articles or books have person authors. + IT REPLACES analytic_authors + """ + authors = [] + for author in self.data.get('v10', []): + authordict = {} + if 's' in author: + authordict['surname'] = html_decode(author['s']) + if 'n' in author: + authordict['given_names'] = html_decode(author['n']) + if 's' in author or 'n' in author: + authors.append(authordict) + if len(authors) > 0: + return authors + @property def analytic_authors(self): """ This method retrieves the authors of the given citation. These authors may correspond to an article, book analytic, link or thesis. + IT WILL BE DEPRECATED. Use analytic_person_authors instead """ - + warn_future_deprecation( + 'analytic_authors', + 'analytic_person_authors', + 'analytic_person_authors is more suitable name' + ) authors = [] if 'v10' in self.data: for author in self.data['v10']: @@ -3084,13 +3063,41 @@ class Citation(object): if len(authors) > 0: return authors + @property + def monographic_person_authors(self): + """ + It retrieves the monographic person authors of a reference, + no matter the publication type of the reference. + It is not desirable to restrict the conditioned return to the + publication type, because some reference standards are very peculiar + and not only articles or books have person authors. + IT REPLACES monographic_authors + """ + authors = [] + for author in self.data.get('v16', []): + authordict = {} + if 's' in author: + authordict['surname'] = html_decode(author['s']) + if 'n' in author: + authordict['given_names'] = html_decode(author['n']) + if 's' in author or 'n' in author: + authors.append(authordict) + if len(authors) > 0: + return authors + @property def monographic_authors(self): """ - This method retrieves the authors of the given book citation. These authors may + This method retrieves the authors of the given book citation. + These authors may correspond to a book monography citation. + IT WILL BE DEPRECATED. Use monographic_person_authors instead. """ - + warn_future_deprecation( + 'monographic_authors', + 'monographic_person_authors', + 'monographic_person_authors is more suitable name' + ) authors = [] if 'v16' in self.data: for author in self.data['v16']:
[referências] Trocar o nome dos atributos analytic_authors e monographic_authors - De analytic_authors para analytic_person_authors - De monographic_authors para monographic_person_authors Indicar obsolescência ``` @property def analytic_authors(self): """ This method retrieves the authors of the given citation. These authors may correspond to an article, book analytic, link or thesis. """ authors = [] if 'v10' in self.data: for author in self.data['v10']: authordict = {} if 's' in author: authordict['surname'] = html_decode(author['s']) if 'n' in author: authordict['given_names'] = html_decode(author['n']) if 's' in author or 'n' in author: authors.append(authordict) if len(authors) > 0: return authors @property def monographic_authors(self): """ This method retrieves the authors of the given book citation. These authors may correspond to a book monography citation. """ authors = [] if 'v16' in self.data: for author in self.data['v16']: authordict = {} if 's' in author: authordict['surname'] = html_decode(author['s']) if 'n' in author: authordict['given_names'] = html_decode(author['n']) if 's' in author or 'n' in author: authors.append(authordict) if len(authors) > 0: return authors ```
scieloorg/xylose
diff --git a/tests/test_document.py b/tests/test_document.py index b2b37dd..464098b 100644 --- a/tests/test_document.py +++ b/tests/test_document.py @@ -4136,106 +4136,20 @@ class CitationTest(unittest.TestCase): json_citation['v30'] = [{u'_': u'It is the journal title'}] json_citation['v12'] = [{u'_': u'It is the article title'}] json_citation['v11'] = [{u'_': u'Article Institution'}] - json_citation['v11'] = [ - {u'_': u'Article Institution'}, - {u'_': u'Article Institution 2'}, - ] citation = Citation(json_citation) - self.assertEqual( - citation.analytic_institution, - [u'Article Institution', u'Article Institution 2'] - ) + + self.assertEqual(citation.analytic_institution, [u'Article Institution']) def test_analytic_institution_for_a_book_citation(self): json_citation = {} json_citation['v18'] = [{u'_': u'It is the book title'}] - json_citation['v11'] = [ - {u'_': u'Book Institution'}, - {u'_': u'Book Institution 2'}, - ] - - citation = Citation(json_citation) - - self.assertEqual( - citation.analytic_institution, - [u'Book Institution', u'Book Institution 2'] - ) - - def test_pending_deprecation_warning_of_analytic_institution(self): - citation = Citation({}) - with warnings.catch_warnings(record=True) as w: - items = citation.analytic_institution - assert items is None - assert len(w) == 1 - assert issubclass(w[-1].category, PendingDeprecationWarning) - - def test_pending_deprecation_warning_of_monographic_institution(self): - citation = Citation({}) - with warnings.catch_warnings(record=True) as w: - items = citation.monographic_institution - assert items is None - assert len(w) == 1 - assert issubclass(w[-1].category, PendingDeprecationWarning) + json_citation['v11'] = [{u'_': u'Book Institution'}] - def test_analytic_institution_authors_for_an_article_citation(self): - json_citation = {} - - json_citation['v30'] = [{u'_': u'It is the journal title'}] - json_citation['v12'] = [{u'_': u'It is the article title'}] - json_citation['v11'] = [{u'_': u'Article Institution'}] - json_citation['v11'] = [ - {u'_': u'Article Institution'}, - {u'_': u'Article Institution 2'}, - ] citation = Citation(json_citation) - self.assertEqual( - citation.analytic_institution_authors, - [u'Article Institution', u'Article Institution 2'] - ) - def test_analytic_institution_authors_for_a_book_citation(self): - json_citation = {} - - json_citation['v18'] = [{u'_': u'It is the book title'}] - json_citation['v11'] = [ - {u'_': u'Book Institution'}, - {u'_': u'Book Institution 2'}, - ] - citation = Citation(json_citation) - self.assertEqual( - citation.analytic_institution_authors, - [u'Book Institution', u'Book Institution 2'] - ) - - def test_monographic_institution_authors_for_an_article_citation(self): - json_citation = {} - - json_citation['v30'] = [{u'_': u'It is the journal title'}] - json_citation['v12'] = [{u'_': u'It is the article title'}] - json_citation['v17'] = [ - {u'_': u'Article Institution'}, - {u'_': u'Article Institution 2'}, - ] - citation = Citation(json_citation) - self.assertEqual( - citation.monographic_institution_authors, - None - ) - - def test_monographic_institution_authors_for_a_book_citation(self): - json_citation = {} - - json_citation['v18'] = [{u'_': u'It is the book title'}] - json_citation['v17'] = [ - {u'_': u'Book Institution'}, - ] - citation = Citation(json_citation) - self.assertEqual( - citation.monographic_institution_authors, - [u'Book Institution'] - ) + self.assertEqual(citation.analytic_institution, [u'Book Institution']) def test_thesis_institution(self): json_citation = {} @@ -4386,6 +4300,112 @@ class CitationTest(unittest.TestCase): self.assertEqual(citation.authors, []) + def test_analytic_person_authors(self): + json_citation = {} + + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + json_citation['v10'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + + expected = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + + citation = Citation(json_citation) + + self.assertEqual(citation.analytic_person_authors, expected) + + def test_without_analytic_person_authors(self): + json_citation = {} + + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + + citation = Citation(json_citation) + + self.assertEqual(citation.analytic_person_authors, None) + + def test_without_analytic_person_authors_but_not_a_book_citation(self): + json_citation = {} + + json_citation['v30'] = [{u'_': u'It is the journal title'}] + json_citation['v12'] = [{u'_': u'It is the article title'}] + json_citation['v10'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + + expected = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + + citation = Citation(json_citation) + + self.assertEqual(citation.analytic_person_authors, expected) + + def test_monographic_person_authors(self): + json_citation = {} + + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + + expected = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + + citation = Citation(json_citation) + + self.assertEqual(citation.monographic_person_authors, expected) + + def test_without_monographic_person_authors(self): + json_citation = {} + + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v16'] = [] + + citation = Citation(json_citation) + + self.assertEqual(citation.monographic_person_authors, None) + + def test_without_monographic_person_authors_but_not_a_book_citation(self): + json_citation = {} + + json_citation['v30'] = [{u'_': u'It is the journal title'}] + json_citation['v12'] = [{u'_': u'It is the article title'}] + + citation = Citation(json_citation) + + self.assertEqual(citation.monographic_person_authors, None) + + def test_pending_deprecation_warning_of_analytic_authors(self): + citation = Citation({}) + with warnings.catch_warnings(record=True) as w: + assert citation.analytic_authors is None + assert len(w) == 1 + assert issubclass(w[-1].category, PendingDeprecationWarning) + + def test_pending_deprecation_warning_of_monographic_authors(self): + citation = Citation({}) + with warnings.catch_warnings(record=True) as w: + self.assertEqual(citation.monographic_authors, None) + assert len(w) == 1 + assert issubclass(w[-1].category, PendingDeprecationWarning) + def test_monographic_authors(self): json_citation = {}
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
1.31
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work legendarium==2.0.6 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/scieloorg/xylose.git@135018ec4be1a30320d1f59caee922ee4a730f41#egg=xylose
name: xylose channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - legendarium==2.0.6 prefix: /opt/conda/envs/xylose
[ "tests/test_document.py::CitationTest::test_analytic_person_authors", "tests/test_document.py::CitationTest::test_monographic_person_authors", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_analytic_authors", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_monographic_authors", "tests/test_document.py::CitationTest::test_without_analytic_person_authors", "tests/test_document.py::CitationTest::test_without_analytic_person_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_without_monographic_person_authors", "tests/test_document.py::CitationTest::test_without_monographic_person_authors_but_not_a_book_citation" ]
[]
[ "tests/test_document.py::ToolsTests::test_creative_commons_html_1", "tests/test_document.py::ToolsTests::test_creative_commons_html_2", "tests/test_document.py::ToolsTests::test_creative_commons_html_3", "tests/test_document.py::ToolsTests::test_creative_commons_html_4", "tests/test_document.py::ToolsTests::test_creative_commons_html_5", "tests/test_document.py::ToolsTests::test_creative_commons_html_6", "tests/test_document.py::ToolsTests::test_creative_commons_text_1", "tests/test_document.py::ToolsTests::test_creative_commons_text_2", "tests/test_document.py::ToolsTests::test_creative_commons_text_3", "tests/test_document.py::ToolsTests::test_creative_commons_text_4", "tests/test_document.py::ToolsTests::test_creative_commons_text_5", "tests/test_document.py::ToolsTests::test_creative_commons_text_6", "tests/test_document.py::ToolsTests::test_creative_commons_text_7", "tests/test_document.py::ToolsTests::test_creative_commons_text_8", "tests/test_document.py::ToolsTests::test_get_date_wrong_day", "tests/test_document.py::ToolsTests::test_get_date_wrong_day_month", "tests/test_document.py::ToolsTests::test_get_date_wrong_day_month_not_int", "tests/test_document.py::ToolsTests::test_get_date_wrong_day_not_int", "tests/test_document.py::ToolsTests::test_get_date_wrong_month_not_int", "tests/test_document.py::ToolsTests::test_get_date_year", "tests/test_document.py::ToolsTests::test_get_date_year_day", "tests/test_document.py::ToolsTests::test_get_date_year_month", "tests/test_document.py::ToolsTests::test_get_date_year_month_day", "tests/test_document.py::ToolsTests::test_get_date_year_month_day_31", "tests/test_document.py::ToolsTests::test_get_language_iso639_1_defined", "tests/test_document.py::ToolsTests::test_get_language_iso639_1_undefined", "tests/test_document.py::ToolsTests::test_get_language_iso639_2_defined", "tests/test_document.py::ToolsTests::test_get_language_iso639_2_undefined", "tests/test_document.py::ToolsTests::test_get_language_without_iso_format", "tests/test_document.py::IssueTests::test_assets_code_month", "tests/test_document.py::IssueTests::test_collection_acronym", "tests/test_document.py::IssueTests::test_creation_date", "tests/test_document.py::IssueTests::test_creation_date_1", "tests/test_document.py::IssueTests::test_creation_date_2", "tests/test_document.py::IssueTests::test_ctrl_vocabulary", "tests/test_document.py::IssueTests::test_ctrl_vocabulary_out_of_choices", "tests/test_document.py::IssueTests::test_is_ahead", "tests/test_document.py::IssueTests::test_is_ahead_1", "tests/test_document.py::IssueTests::test_is_marked_up", "tests/test_document.py::IssueTests::test_is_press_release_false_1", "tests/test_document.py::IssueTests::test_is_press_release_false_2", "tests/test_document.py::IssueTests::test_is_press_release_true", "tests/test_document.py::IssueTests::test_issue", "tests/test_document.py::IssueTests::test_issue_journal_without_journal_metadata", "tests/test_document.py::IssueTests::test_issue_label", "tests/test_document.py::IssueTests::test_issue_url", "tests/test_document.py::IssueTests::test_order", "tests/test_document.py::IssueTests::test_permission_from_journal", "tests/test_document.py::IssueTests::test_permission_id", "tests/test_document.py::IssueTests::test_permission_t0", "tests/test_document.py::IssueTests::test_permission_t1", "tests/test_document.py::IssueTests::test_permission_t2", "tests/test_document.py::IssueTests::test_permission_t3", "tests/test_document.py::IssueTests::test_permission_t4", "tests/test_document.py::IssueTests::test_permission_text", "tests/test_document.py::IssueTests::test_permission_url", "tests/test_document.py::IssueTests::test_permission_without_v540", "tests/test_document.py::IssueTests::test_permission_without_v540_t", "tests/test_document.py::IssueTests::test_processing_date", "tests/test_document.py::IssueTests::test_processing_date_1", "tests/test_document.py::IssueTests::test_publication_date", "tests/test_document.py::IssueTests::test_sections", "tests/test_document.py::IssueTests::test_standard", "tests/test_document.py::IssueTests::test_standard_out_of_choices", "tests/test_document.py::IssueTests::test_start_end_month", "tests/test_document.py::IssueTests::test_start_end_month_1", "tests/test_document.py::IssueTests::test_start_end_month_2", "tests/test_document.py::IssueTests::test_start_end_month_3", "tests/test_document.py::IssueTests::test_start_end_month_4", "tests/test_document.py::IssueTests::test_start_end_month_5", "tests/test_document.py::IssueTests::test_start_end_month_6", "tests/test_document.py::IssueTests::test_supplement_number", "tests/test_document.py::IssueTests::test_supplement_volume", "tests/test_document.py::IssueTests::test_title_titles", "tests/test_document.py::IssueTests::test_title_titles_1", "tests/test_document.py::IssueTests::test_title_without_titles", "tests/test_document.py::IssueTests::test_total_documents", "tests/test_document.py::IssueTests::test_total_documents_without_data", "tests/test_document.py::IssueTests::test_type_pressrelease", "tests/test_document.py::IssueTests::test_type_regular", "tests/test_document.py::IssueTests::test_type_supplement_1", "tests/test_document.py::IssueTests::test_type_supplement_2", "tests/test_document.py::IssueTests::test_update_date", "tests/test_document.py::IssueTests::test_update_date_1", "tests/test_document.py::IssueTests::test_update_date_2", "tests/test_document.py::IssueTests::test_update_date_3", "tests/test_document.py::IssueTests::test_volume", "tests/test_document.py::IssueTests::test_without_ctrl_vocabulary", "tests/test_document.py::IssueTests::test_without_ctrl_vocabulary_also_in_journal", "tests/test_document.py::IssueTests::test_without_issue", "tests/test_document.py::IssueTests::test_without_processing_date", "tests/test_document.py::IssueTests::test_without_publication_date", "tests/test_document.py::IssueTests::test_without_standard", "tests/test_document.py::IssueTests::test_without_standard_also_in_journal", "tests/test_document.py::IssueTests::test_without_suplement_number", "tests/test_document.py::IssueTests::test_without_supplement_volume", "tests/test_document.py::IssueTests::test_without_volume", "tests/test_document.py::JournalTests::test_abstract_languages", "tests/test_document.py::JournalTests::test_abstract_languages_without_v350", "tests/test_document.py::JournalTests::test_any_issn_priority_electronic", "tests/test_document.py::JournalTests::test_any_issn_priority_electronic_without_electronic", "tests/test_document.py::JournalTests::test_any_issn_priority_print", "tests/test_document.py::JournalTests::test_any_issn_priority_print_without_print", "tests/test_document.py::JournalTests::test_cnn_code", "tests/test_document.py::JournalTests::test_collection_acronym", "tests/test_document.py::JournalTests::test_creation_date", "tests/test_document.py::JournalTests::test_ctrl_vocabulary", "tests/test_document.py::JournalTests::test_ctrl_vocabulary_out_of_choices", "tests/test_document.py::JournalTests::test_current_status", "tests/test_document.py::JournalTests::test_current_status_lots_of_changes_study_case_1", "tests/test_document.py::JournalTests::test_current_status_some_changes", "tests/test_document.py::JournalTests::test_current_without_v51", "tests/test_document.py::JournalTests::test_editor_address", "tests/test_document.py::JournalTests::test_editor_address_without_data", "tests/test_document.py::JournalTests::test_editor_email", "tests/test_document.py::JournalTests::test_editor_email_without_data", "tests/test_document.py::JournalTests::test_first_number", "tests/test_document.py::JournalTests::test_first_number_1", "tests/test_document.py::JournalTests::test_first_volume", "tests/test_document.py::JournalTests::test_first_volume_1", "tests/test_document.py::JournalTests::test_first_year", "tests/test_document.py::JournalTests::test_first_year_1", "tests/test_document.py::JournalTests::test_first_year_2", "tests/test_document.py::JournalTests::test_first_year_3", "tests/test_document.py::JournalTests::test_first_year_4", "tests/test_document.py::JournalTests::test_in_ahci", "tests/test_document.py::JournalTests::test_in_scie", "tests/test_document.py::JournalTests::test_in_ssci", "tests/test_document.py::JournalTests::test_institutional_url", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_false_with_field_regular", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_false_with_field_undefined", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_false_without_field", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_true", "tests/test_document.py::JournalTests::test_is_publishing_model_regular_1", "tests/test_document.py::JournalTests::test_is_publishing_model_regular_2", "tests/test_document.py::JournalTests::test_journal", "tests/test_document.py::JournalTests::test_journal_abbreviated_title", "tests/test_document.py::JournalTests::test_journal_acronym", "tests/test_document.py::JournalTests::test_journal_copyrighter", "tests/test_document.py::JournalTests::test_journal_copyrighter_without_copyright", "tests/test_document.py::JournalTests::test_journal_fulltitle", "tests/test_document.py::JournalTests::test_journal_fulltitle_without_subtitle", "tests/test_document.py::JournalTests::test_journal_fulltitle_without_title", "tests/test_document.py::JournalTests::test_journal_mission", "tests/test_document.py::JournalTests::test_journal_mission_without_language_key", "tests/test_document.py::JournalTests::test_journal_mission_without_mission", "tests/test_document.py::JournalTests::test_journal_mission_without_mission_text", "tests/test_document.py::JournalTests::test_journal_mission_without_mission_text_and_language", "tests/test_document.py::JournalTests::test_journal_other_title_without_other_titles", "tests/test_document.py::JournalTests::test_journal_other_titles", "tests/test_document.py::JournalTests::test_journal_publisher_country", "tests/test_document.py::JournalTests::test_journal_publisher_country_not_findable_code", "tests/test_document.py::JournalTests::test_journal_publisher_country_without_country", "tests/test_document.py::JournalTests::test_journal_sponsors", "tests/test_document.py::JournalTests::test_journal_sponsors_with_empty_items", "tests/test_document.py::JournalTests::test_journal_sponsors_without_sponsors", "tests/test_document.py::JournalTests::test_journal_subtitle", "tests/test_document.py::JournalTests::test_journal_title", "tests/test_document.py::JournalTests::test_journal_title_nlm", "tests/test_document.py::JournalTests::test_journal_url", "tests/test_document.py::JournalTests::test_journal_without_subtitle", "tests/test_document.py::JournalTests::test_languages", "tests/test_document.py::JournalTests::test_languages_without_v350", "tests/test_document.py::JournalTests::test_last_cnn_code_1", "tests/test_document.py::JournalTests::test_last_number", "tests/test_document.py::JournalTests::test_last_number_1", "tests/test_document.py::JournalTests::test_last_volume", "tests/test_document.py::JournalTests::test_last_volume_1", "tests/test_document.py::JournalTests::test_last_year", "tests/test_document.py::JournalTests::test_last_year_1", "tests/test_document.py::JournalTests::test_last_year_2", "tests/test_document.py::JournalTests::test_last_year_3", "tests/test_document.py::JournalTests::test_last_year_4", "tests/test_document.py::JournalTests::test_load_issn_with_v435", "tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_ONLINE", "tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_PRINT", "tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_ONLINE", "tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_PRINT", "tests/test_document.py::JournalTests::test_load_issn_with_v935_without_v35", "tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_ONLINE", "tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_PRINT", "tests/test_document.py::JournalTests::test_load_issn_without_v935_without_v35", "tests/test_document.py::JournalTests::test_periodicity", "tests/test_document.py::JournalTests::test_periodicity_in_months", "tests/test_document.py::JournalTests::test_periodicity_in_months_out_of_choices", "tests/test_document.py::JournalTests::test_periodicity_out_of_choices", "tests/test_document.py::JournalTests::test_permission_id", "tests/test_document.py::JournalTests::test_permission_t0", "tests/test_document.py::JournalTests::test_permission_t1", "tests/test_document.py::JournalTests::test_permission_t2", "tests/test_document.py::JournalTests::test_permission_t3", "tests/test_document.py::JournalTests::test_permission_t4", "tests/test_document.py::JournalTests::test_permission_text", "tests/test_document.py::JournalTests::test_permission_url", "tests/test_document.py::JournalTests::test_permission_without_v540", "tests/test_document.py::JournalTests::test_permission_without_v540_t", "tests/test_document.py::JournalTests::test_plevel", "tests/test_document.py::JournalTests::test_plevel_out_of_choices", "tests/test_document.py::JournalTests::test_previous_title", "tests/test_document.py::JournalTests::test_previous_title_without_data", "tests/test_document.py::JournalTests::test_publisher_city", "tests/test_document.py::JournalTests::test_publisher_loc", "tests/test_document.py::JournalTests::test_publisher_name", "tests/test_document.py::JournalTests::test_publisher_state", "tests/test_document.py::JournalTests::test_scielo_issn", "tests/test_document.py::JournalTests::test_secs_code", "tests/test_document.py::JournalTests::test_standard", "tests/test_document.py::JournalTests::test_standard_out_of_choices", "tests/test_document.py::JournalTests::test_status", "tests/test_document.py::JournalTests::test_status_lots_of_changes", "tests/test_document.py::JournalTests::test_status_lots_of_changes_study_case_1", "tests/test_document.py::JournalTests::test_status_lots_of_changes_with_reason", "tests/test_document.py::JournalTests::test_status_some_changes", "tests/test_document.py::JournalTests::test_status_without_v51", "tests/test_document.py::JournalTests::test_subject_areas", "tests/test_document.py::JournalTests::test_subject_descriptors", "tests/test_document.py::JournalTests::test_subject_index_coverage", "tests/test_document.py::JournalTests::test_submission_url", "tests/test_document.py::JournalTests::test_update_date", "tests/test_document.py::JournalTests::test_without_ctrl_vocabulary", "tests/test_document.py::JournalTests::test_without_index_coverage", "tests/test_document.py::JournalTests::test_without_institutional_url", "tests/test_document.py::JournalTests::test_without_journal_abbreviated_title", "tests/test_document.py::JournalTests::test_without_journal_acronym", "tests/test_document.py::JournalTests::test_without_journal_title", "tests/test_document.py::JournalTests::test_without_journal_title_nlm", "tests/test_document.py::JournalTests::test_without_journal_url", "tests/test_document.py::JournalTests::test_without_periodicity", "tests/test_document.py::JournalTests::test_without_periodicity_in_months", "tests/test_document.py::JournalTests::test_without_plevel", "tests/test_document.py::JournalTests::test_without_publisher_city", "tests/test_document.py::JournalTests::test_without_publisher_loc", "tests/test_document.py::JournalTests::test_without_publisher_name", "tests/test_document.py::JournalTests::test_without_publisher_state", "tests/test_document.py::JournalTests::test_without_scielo_domain", "tests/test_document.py::JournalTests::test_without_scielo_domain_title_v690", "tests/test_document.py::JournalTests::test_without_secs_code", "tests/test_document.py::JournalTests::test_without_standard", "tests/test_document.py::JournalTests::test_without_subject_areas", "tests/test_document.py::JournalTests::test_without_subject_descriptors", "tests/test_document.py::JournalTests::test_without_wos_citation_indexes", "tests/test_document.py::JournalTests::test_without_wos_subject_areas", "tests/test_document.py::JournalTests::test_wos_citation_indexes", "tests/test_document.py::JournalTests::test_wos_subject_areas", "tests/test_document.py::ArticleTests::test_abstracts", "tests/test_document.py::ArticleTests::test_abstracts_iso639_2", "tests/test_document.py::ArticleTests::test_abstracts_without_v83", "tests/test_document.py::ArticleTests::test_acceptance_date", "tests/test_document.py::ArticleTests::test_affiliation_just_with_affiliation_name", "tests/test_document.py::ArticleTests::test_affiliation_with_country_iso_3166", "tests/test_document.py::ArticleTests::test_affiliation_without_affiliation_name", "tests/test_document.py::ArticleTests::test_affiliations", "tests/test_document.py::ArticleTests::test_ahead_publication_date", "tests/test_document.py::ArticleTests::test_article", "tests/test_document.py::ArticleTests::test_author_with_two_affiliations", "tests/test_document.py::ArticleTests::test_author_with_two_role", "tests/test_document.py::ArticleTests::test_author_without_affiliations", "tests/test_document.py::ArticleTests::test_author_without_surname_and_given_names", "tests/test_document.py::ArticleTests::test_authors", "tests/test_document.py::ArticleTests::test_collection_acronym", "tests/test_document.py::ArticleTests::test_collection_acronym_priorizing_collection", "tests/test_document.py::ArticleTests::test_collection_acronym_retrieving_v992", "tests/test_document.py::ArticleTests::test_collection_name_brazil", "tests/test_document.py::ArticleTests::test_collection_name_undefined", "tests/test_document.py::ArticleTests::test_corporative_authors", "tests/test_document.py::ArticleTests::test_creation_date", "tests/test_document.py::ArticleTests::test_creation_date_1", "tests/test_document.py::ArticleTests::test_creation_date_2", "tests/test_document.py::ArticleTests::test_data_model_version_html", "tests/test_document.py::ArticleTests::test_data_model_version_html_1", "tests/test_document.py::ArticleTests::test_data_model_version_xml", "tests/test_document.py::ArticleTests::test_document_type", "tests/test_document.py::ArticleTests::test_document_without_issue_metadata", "tests/test_document.py::ArticleTests::test_document_without_journal_metadata", "tests/test_document.py::ArticleTests::test_doi", "tests/test_document.py::ArticleTests::test_doi_clean_1", "tests/test_document.py::ArticleTests::test_doi_clean_2", "tests/test_document.py::ArticleTests::test_doi_v237", "tests/test_document.py::ArticleTests::test_e_location", "tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_1", "tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_2", "tests/test_document.py::ArticleTests::test_end_page_loaded_through_xml", "tests/test_document.py::ArticleTests::test_file_code", "tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_1", "tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_2", "tests/test_document.py::ArticleTests::test_first_author", "tests/test_document.py::ArticleTests::test_first_author_without_author", "tests/test_document.py::ArticleTests::test_fulltexts_field_fulltexts", "tests/test_document.py::ArticleTests::test_fulltexts_without_field_fulltexts", "tests/test_document.py::ArticleTests::test_html_url", "tests/test_document.py::ArticleTests::test_invalid_document_type", "tests/test_document.py::ArticleTests::test_issue_url", "tests/test_document.py::ArticleTests::test_journal_abbreviated_title", "tests/test_document.py::ArticleTests::test_keywords", "tests/test_document.py::ArticleTests::test_keywords_iso639_2", "tests/test_document.py::ArticleTests::test_keywords_with_undefined_language", "tests/test_document.py::ArticleTests::test_keywords_without_subfield_k", "tests/test_document.py::ArticleTests::test_keywords_without_subfield_l", "tests/test_document.py::ArticleTests::test_languages_field_fulltexts", "tests/test_document.py::ArticleTests::test_languages_field_v40", "tests/test_document.py::ArticleTests::test_last_page", "tests/test_document.py::ArticleTests::test_mixed_affiliations_1", "tests/test_document.py::ArticleTests::test_normalized_affiliations", "tests/test_document.py::ArticleTests::test_normalized_affiliations_undefined_ISO_3166_CODE", "tests/test_document.py::ArticleTests::test_normalized_affiliations_without_p", "tests/test_document.py::ArticleTests::test_order", "tests/test_document.py::ArticleTests::test_original_abstract_with_just_one_language_defined", "tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined", "tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined_but_different_of_the_article_original_language", "tests/test_document.py::ArticleTests::test_original_abstract_without_language_defined", "tests/test_document.py::ArticleTests::test_original_html_field_body", "tests/test_document.py::ArticleTests::test_original_language_invalid_iso639_2", "tests/test_document.py::ArticleTests::test_original_language_iso639_2", "tests/test_document.py::ArticleTests::test_original_language_original", "tests/test_document.py::ArticleTests::test_original_section_field_v49", "tests/test_document.py::ArticleTests::test_original_title_subfield_t", "tests/test_document.py::ArticleTests::test_original_title_with_just_one_language_defined", "tests/test_document.py::ArticleTests::test_original_title_with_language_defined", "tests/test_document.py::ArticleTests::test_original_title_with_language_defined_but_different_of_the_article_original_language", "tests/test_document.py::ArticleTests::test_original_title_without_language_defined", "tests/test_document.py::ArticleTests::test_pdf_url", "tests/test_document.py::ArticleTests::test_processing_date", "tests/test_document.py::ArticleTests::test_processing_date_1", "tests/test_document.py::ArticleTests::test_project_name", "tests/test_document.py::ArticleTests::test_project_sponsors", "tests/test_document.py::ArticleTests::test_publication_contract", "tests/test_document.py::ArticleTests::test_publication_date_with_article_date", "tests/test_document.py::ArticleTests::test_publication_date_without_article_date", "tests/test_document.py::ArticleTests::test_publisher_ahead_id", "tests/test_document.py::ArticleTests::test_publisher_ahead_id_none", "tests/test_document.py::ArticleTests::test_publisher_id", "tests/test_document.py::ArticleTests::test_receive_date", "tests/test_document.py::ArticleTests::test_review_date", "tests/test_document.py::ArticleTests::test_section_code_field_v49", "tests/test_document.py::ArticleTests::test_section_code_nd_field_v49", "tests/test_document.py::ArticleTests::test_section_code_without_field_v49", "tests/test_document.py::ArticleTests::test_section_field_v49", "tests/test_document.py::ArticleTests::test_section_nd_field_v49", "tests/test_document.py::ArticleTests::test_section_without_field_section", "tests/test_document.py::ArticleTests::test_section_without_field_v49", "tests/test_document.py::ArticleTests::test_start_page", "tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_1", "tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_2", "tests/test_document.py::ArticleTests::test_start_page_loaded_through_xml", "tests/test_document.py::ArticleTests::test_start_page_sec", "tests/test_document.py::ArticleTests::test_start_page_sec_0", "tests/test_document.py::ArticleTests::test_start_page_sec_0_loaded_through_xml", "tests/test_document.py::ArticleTests::test_start_page_sec_loaded_through_xml", "tests/test_document.py::ArticleTests::test_subject_areas", "tests/test_document.py::ArticleTests::test_thesis_degree", "tests/test_document.py::ArticleTests::test_thesis_organization", "tests/test_document.py::ArticleTests::test_thesis_organization_and_division", "tests/test_document.py::ArticleTests::test_thesis_organization_without_name", "tests/test_document.py::ArticleTests::test_translated_abstracts", "tests/test_document.py::ArticleTests::test_translated_abstracts_without_v83", "tests/test_document.py::ArticleTests::test_translated_abtracts_iso639_2", "tests/test_document.py::ArticleTests::test_translated_htmls_field_body", "tests/test_document.py::ArticleTests::test_translated_section_field_v49", "tests/test_document.py::ArticleTests::test_translated_titles", "tests/test_document.py::ArticleTests::test_translated_titles_iso639_2", "tests/test_document.py::ArticleTests::test_translated_titles_without_v12", "tests/test_document.py::ArticleTests::test_update_date", "tests/test_document.py::ArticleTests::test_update_date_1", "tests/test_document.py::ArticleTests::test_update_date_2", "tests/test_document.py::ArticleTests::test_update_date_3", "tests/test_document.py::ArticleTests::test_whitwout_acceptance_date", "tests/test_document.py::ArticleTests::test_whitwout_ahead_publication_date", "tests/test_document.py::ArticleTests::test_whitwout_receive_date", "tests/test_document.py::ArticleTests::test_whitwout_review_date", "tests/test_document.py::ArticleTests::test_without_affiliations", "tests/test_document.py::ArticleTests::test_without_authors", "tests/test_document.py::ArticleTests::test_without_citations", "tests/test_document.py::ArticleTests::test_without_collection_acronym", "tests/test_document.py::ArticleTests::test_without_corporative_authors", "tests/test_document.py::ArticleTests::test_without_document_type", "tests/test_document.py::ArticleTests::test_without_doi", "tests/test_document.py::ArticleTests::test_without_e_location", "tests/test_document.py::ArticleTests::test_without_html_url", "tests/test_document.py::ArticleTests::test_without_issue_url", "tests/test_document.py::ArticleTests::test_without_journal_abbreviated_title", "tests/test_document.py::ArticleTests::test_without_keywords", "tests/test_document.py::ArticleTests::test_without_last_page", "tests/test_document.py::ArticleTests::test_without_normalized_affiliations", "tests/test_document.py::ArticleTests::test_without_order", "tests/test_document.py::ArticleTests::test_without_original_abstract", "tests/test_document.py::ArticleTests::test_without_original_title", "tests/test_document.py::ArticleTests::test_without_pages", "tests/test_document.py::ArticleTests::test_without_pdf_url", "tests/test_document.py::ArticleTests::test_without_processing_date", "tests/test_document.py::ArticleTests::test_without_project_name", "tests/test_document.py::ArticleTests::test_without_project_sponsor", "tests/test_document.py::ArticleTests::test_without_publication_contract", "tests/test_document.py::ArticleTests::test_without_publication_date", "tests/test_document.py::ArticleTests::test_without_publisher_id", "tests/test_document.py::ArticleTests::test_without_scielo_domain", "tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69", "tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69_and_with_title_v690", "tests/test_document.py::ArticleTests::test_without_scielo_domain_title_v690", "tests/test_document.py::ArticleTests::test_without_start_page", "tests/test_document.py::ArticleTests::test_without_subject_areas", "tests/test_document.py::ArticleTests::test_without_thesis_degree", "tests/test_document.py::ArticleTests::test_without_thesis_organization", "tests/test_document.py::ArticleTests::test_without_wos_citation_indexes", "tests/test_document.py::ArticleTests::test_without_wos_subject_areas", "tests/test_document.py::ArticleTests::test_wos_citation_indexes", "tests/test_document.py::ArticleTests::test_wos_subject_areas", "tests/test_document.py::CitationTest::test_a_link_access_date", "tests/test_document.py::CitationTest::test_a_link_access_date_absent_v65", "tests/test_document.py::CitationTest::test_analytic_institution_for_a_article_citation", "tests/test_document.py::CitationTest::test_analytic_institution_for_a_book_citation", "tests/test_document.py::CitationTest::test_article_title", "tests/test_document.py::CitationTest::test_article_without_title", "tests/test_document.py::CitationTest::test_authors_article", "tests/test_document.py::CitationTest::test_authors_book", "tests/test_document.py::CitationTest::test_authors_link", "tests/test_document.py::CitationTest::test_authors_thesis", "tests/test_document.py::CitationTest::test_book_chapter_title", "tests/test_document.py::CitationTest::test_book_edition", "tests/test_document.py::CitationTest::test_book_volume", "tests/test_document.py::CitationTest::test_book_without_chapter_title", "tests/test_document.py::CitationTest::test_citation_sample_congress", "tests/test_document.py::CitationTest::test_citation_sample_link", "tests/test_document.py::CitationTest::test_citation_sample_link_without_comment", "tests/test_document.py::CitationTest::test_conference_edition", "tests/test_document.py::CitationTest::test_conference_name", "tests/test_document.py::CitationTest::test_conference_sponsor", "tests/test_document.py::CitationTest::test_conference_without_name", "tests/test_document.py::CitationTest::test_conference_without_sponsor", "tests/test_document.py::CitationTest::test_date", "tests/test_document.py::CitationTest::test_doi", "tests/test_document.py::CitationTest::test_editor", "tests/test_document.py::CitationTest::test_elocation_14", "tests/test_document.py::CitationTest::test_elocation_514", "tests/test_document.py::CitationTest::test_end_page_14", "tests/test_document.py::CitationTest::test_end_page_514", "tests/test_document.py::CitationTest::test_end_page_withdout_data", "tests/test_document.py::CitationTest::test_first_author_article", "tests/test_document.py::CitationTest::test_first_author_book", "tests/test_document.py::CitationTest::test_first_author_link", "tests/test_document.py::CitationTest::test_first_author_thesis", "tests/test_document.py::CitationTest::test_first_author_without_monographic_authors", "tests/test_document.py::CitationTest::test_first_author_without_monographic_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_index_number", "tests/test_document.py::CitationTest::test_institutions_all_fields", "tests/test_document.py::CitationTest::test_institutions_v11", "tests/test_document.py::CitationTest::test_institutions_v17", "tests/test_document.py::CitationTest::test_institutions_v29", "tests/test_document.py::CitationTest::test_institutions_v50", "tests/test_document.py::CitationTest::test_institutions_v58", "tests/test_document.py::CitationTest::test_invalid_edition", "tests/test_document.py::CitationTest::test_isbn", "tests/test_document.py::CitationTest::test_isbn_but_not_a_book", "tests/test_document.py::CitationTest::test_issn", "tests/test_document.py::CitationTest::test_issn_but_not_an_article", "tests/test_document.py::CitationTest::test_issue_part", "tests/test_document.py::CitationTest::test_issue_title", "tests/test_document.py::CitationTest::test_journal_issue", "tests/test_document.py::CitationTest::test_journal_volume", "tests/test_document.py::CitationTest::test_link", "tests/test_document.py::CitationTest::test_link_title", "tests/test_document.py::CitationTest::test_link_without_title", "tests/test_document.py::CitationTest::test_mixed_citation_1", "tests/test_document.py::CitationTest::test_mixed_citation_10", "tests/test_document.py::CitationTest::test_mixed_citation_11", "tests/test_document.py::CitationTest::test_mixed_citation_12", "tests/test_document.py::CitationTest::test_mixed_citation_13", "tests/test_document.py::CitationTest::test_mixed_citation_14", "tests/test_document.py::CitationTest::test_mixed_citation_15", "tests/test_document.py::CitationTest::test_mixed_citation_16", "tests/test_document.py::CitationTest::test_mixed_citation_17", "tests/test_document.py::CitationTest::test_mixed_citation_18", "tests/test_document.py::CitationTest::test_mixed_citation_19", "tests/test_document.py::CitationTest::test_mixed_citation_2", "tests/test_document.py::CitationTest::test_mixed_citation_3", "tests/test_document.py::CitationTest::test_mixed_citation_4", "tests/test_document.py::CitationTest::test_mixed_citation_5", "tests/test_document.py::CitationTest::test_mixed_citation_6", "tests/test_document.py::CitationTest::test_mixed_citation_7", "tests/test_document.py::CitationTest::test_mixed_citation_8", "tests/test_document.py::CitationTest::test_mixed_citation_9", "tests/test_document.py::CitationTest::test_mixed_citation_without_data", "tests/test_document.py::CitationTest::test_monographic_authors", "tests/test_document.py::CitationTest::test_monographic_first_author", "tests/test_document.py::CitationTest::test_pages_14", "tests/test_document.py::CitationTest::test_pages_514", "tests/test_document.py::CitationTest::test_pages_withdout_data", "tests/test_document.py::CitationTest::test_publication_type_article", "tests/test_document.py::CitationTest::test_publication_type_book", "tests/test_document.py::CitationTest::test_publication_type_book_chapter", "tests/test_document.py::CitationTest::test_publication_type_conference", "tests/test_document.py::CitationTest::test_publication_type_link", "tests/test_document.py::CitationTest::test_publication_type_thesis", "tests/test_document.py::CitationTest::test_publication_type_undefined", "tests/test_document.py::CitationTest::test_publisher", "tests/test_document.py::CitationTest::test_publisher_address", "tests/test_document.py::CitationTest::test_publisher_address_without_e", "tests/test_document.py::CitationTest::test_series_book", "tests/test_document.py::CitationTest::test_series_but_neither_journal_book_or_conference_citation", "tests/test_document.py::CitationTest::test_series_conference", "tests/test_document.py::CitationTest::test_series_journal", "tests/test_document.py::CitationTest::test_source_book_title", "tests/test_document.py::CitationTest::test_source_journal", "tests/test_document.py::CitationTest::test_source_journal_without_journal_title", "tests/test_document.py::CitationTest::test_sponsor", "tests/test_document.py::CitationTest::test_start_page_14", "tests/test_document.py::CitationTest::test_start_page_514", "tests/test_document.py::CitationTest::test_start_page_withdout_data", "tests/test_document.py::CitationTest::test_thesis_institution", "tests/test_document.py::CitationTest::test_thesis_title", "tests/test_document.py::CitationTest::test_thesis_without_title", "tests/test_document.py::CitationTest::test_title_when_article_citation", "tests/test_document.py::CitationTest::test_title_when_conference_citation", "tests/test_document.py::CitationTest::test_title_when_link_citation", "tests/test_document.py::CitationTest::test_title_when_thesis_citation", "tests/test_document.py::CitationTest::test_with_volume_but_not_a_journal_article_neither_a_book", "tests/test_document.py::CitationTest::test_without_analytic_institution", "tests/test_document.py::CitationTest::test_without_authors", "tests/test_document.py::CitationTest::test_without_date", "tests/test_document.py::CitationTest::test_without_doi", "tests/test_document.py::CitationTest::test_without_edition", "tests/test_document.py::CitationTest::test_without_editor", "tests/test_document.py::CitationTest::test_without_first_author", "tests/test_document.py::CitationTest::test_without_index_number", "tests/test_document.py::CitationTest::test_without_institutions", "tests/test_document.py::CitationTest::test_without_issue", "tests/test_document.py::CitationTest::test_without_issue_part", "tests/test_document.py::CitationTest::test_without_issue_title", "tests/test_document.py::CitationTest::test_without_link", "tests/test_document.py::CitationTest::test_without_monographic_authors", "tests/test_document.py::CitationTest::test_without_monographic_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_without_publisher", "tests/test_document.py::CitationTest::test_without_publisher_address", "tests/test_document.py::CitationTest::test_without_series", "tests/test_document.py::CitationTest::test_without_sponsor", "tests/test_document.py::CitationTest::test_without_thesis_institution", "tests/test_document.py::CitationTest::test_without_volume" ]
[]
BSD 2-Clause "Simplified" License
2,859
[ "xylose/scielodocument.py" ]
[ "xylose/scielodocument.py" ]
pennmem__cmlreaders-170
355bb312d51b4429738ea491b7cfea4d2fec490c
2018-08-02 18:12:22
355bb312d51b4429738ea491b7cfea4d2fec490c
diff --git a/cmlreaders/cmlreader.py b/cmlreaders/cmlreader.py index ecd5506..b450da2 100644 --- a/cmlreaders/cmlreader.py +++ b/cmlreaders/cmlreader.py @@ -113,6 +113,15 @@ class CMLReader(object): setattr(self, "_" + which, value) return value + @staticmethod + def get_data_index(protocol: str = "all", + rootdir: Optional[str] = None) -> pd.DataFrame: + """Shortcut for the global :func:`get_data_index` function to only + need to import :class:`CMLReader`. + + """ + return get_data_index(protocol, rootdir) + @property def localization(self) -> int: """Determine the localization number."""
Make get_data_index a static method of CMLReader Ideally, the only entry point for `cmlreaders` should be the `CMLReader` class itself. Right now, to get the data index, one also needs to import the `get_data_index` function. This could be simplified by making this a classmethod: ```python >>> from cmlreaders import CMLReader >>> df = CMLReader.get_data_index("r1") ``` or maybe for less typing simply ```python df = CMLReader.index("r1") ```
pennmem/cmlreaders
diff --git a/cmlreaders/test/test_cmlreader.py b/cmlreaders/test/test_cmlreader.py index 05066bd..1ba646e 100644 --- a/cmlreaders/test/test_cmlreader.py +++ b/cmlreaders/test/test_cmlreader.py @@ -7,13 +7,25 @@ import pandas as pd from pkg_resources import resource_filename import pytest -from cmlreaders import CMLReader, exc +from cmlreaders import CMLReader, exc, get_data_index +from cmlreaders.path_finder import PathFinder from cmlreaders.test.utils import patched_cmlreader datafile = functools.partial(resource_filename, 'cmlreaders.test.data') class TestCMLReader: + @pytest.mark.parametrize("protocol", ["all", "r1"]) + def test_get_data_index(self, protocol): + if protocol is "all": + path = resource_filename("cmlreaders.test.data", "r1.json") + else: + path = resource_filename("cmlreaders.test.data", protocol + ".json") + + with patch.object(PathFinder, "find", return_value=path): + ix = CMLReader.get_data_index(protocol) + assert all(ix == get_data_index(protocol)) + @pytest.mark.parametrize("subject,experiment,session,localization,montage", [ ("R1278E", "catFR1", 0, 0, 1), ("R1278E", "catFR1", None, 0, 1),
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.8
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 cached-property==1.5.2 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/pennmem/cmlreaders.git@355bb312d51b4429738ea491b7cfea4d2fec490c#egg=cmlreaders codecov==2.1.13 coverage==6.2 cycler==0.11.0 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 flake8==3.9.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.6.1 mistune==0.8.4 mne==0.23.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.7.0 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 zipp==3.6.0
name: cmlreaders channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cached-property==1.5.2 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cycler==0.11.0 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - flake8==3.9.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.6.1 - mistune==0.8.4 - mne==0.23.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cmlreaders
[ "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_data_index[all]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_data_index[r1]" ]
[ "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[voxel_coordinates-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[voxel_coordinates-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_excluded_leads-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_excluded_leads-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[jacksheet-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[jacksheet-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[good_leads-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[good_leads-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[leads-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[leads-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_coordinates-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_coordinates-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[prior_stim_results-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[prior_stim_results-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[target_selection_table-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[target_selection_table-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_categories-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[electrode_categories-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_summary-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[classifier_summary-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_summary-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_summary-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[session_summary-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[session_summary-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[pairs-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[pairs-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[contacts-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[contacts-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[localization-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[localization-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[baseline_classifier-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[baseline_classifier-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[used_classifier-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[used_classifier-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[all_events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[all_events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[task_events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[task_events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_events-R1405E-FR1-1-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_from_rhino[math_events-LTP093-ltpFR2-0-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[baseline_classifier.zip]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[used_classifier.zip]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[baseline_classifier]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[used_classifier]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_ps_events[R1354E-PS4_FR-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_ps_events[R1111M-PS2-0]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_ps_events[R1025P-PS1-0]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[True-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[True-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[False-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_rhino[False-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_missing[contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories_missing[pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[R1111M-FR1-4]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[subjects2-experiments2-5]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[subjects3-None-22]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[subjects4-experiments4-6]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[None-experiments5-79]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[R1289C-experiments6-3]" ]
[ "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-catFR1-0-0-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-catFR1-None-0-1]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-PAL1-None-2-2]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-PAL3-2-2-2]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-TH1-0-0-0]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[R1278E-TH1-None-0-0]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_determine_localization_or_montage[LTP093-ltpFR2-0-None-None]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[voxel_coordinates.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[classifier_excluded_leads.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[jacksheet.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[good_leads.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[leads.txt]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[electrode_coordinates.csv]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[prior_stim_results.csv]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[target_selection_table.csv]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[pairs.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[contacts.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[localization.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[all_events.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[math_events.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load[task_events.json]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[voxel_coordinates]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[classifier_excluded_leads]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[jacksheet]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[good_leads]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[leads]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[electrode_coordinates]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[prior_stim_results]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[target_selection_table]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[pairs]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[contacts]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[localization]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[all_events]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[math_events]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_get_reader[task_events]", "cmlreaders/test/test_cmlreader.py::TestCMLReader::test_load_unimplemented", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[True-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[True-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[False-contacts]", "cmlreaders/test/test_cmlreader.py::TestLoadMontage::test_read_categories[False-pairs]", "cmlreaders/test/test_cmlreader.py::TestLoadAggregate::test_load_events[None-None-None]" ]
[]
null
2,860
[ "cmlreaders/cmlreader.py" ]
[ "cmlreaders/cmlreader.py" ]
delph-in__pydelphin-162
83108aee8c1db2b4293f118616301d092a33f4df
2018-08-02 21:07:32
49868c546cec5d4e65064bbae009f71156e14f3c
diff --git a/CHANGELOG.md b/CHANGELOG.md index c622845..d728f60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,14 @@ * Converting to PENMAN via the `convert` command should no longer crash for disconnected graphs, but print a log message to stderr, print a blank line to stdout, and then continue (#161) +* Updated the docstrings for `delphin.mrs.xmrs.Xmrs.args()`, + `delphin.mrs.xmrs.Xmrs.outgoing_args()`, and `delphin.mrs.xmrs.Xmrs.incoming_args()`, + from "DMRS-style undirected links" to "MOD/EQ links" and updated the Return + value of `Xmrs.args()` and `Xmrs.outgoing_args` from `{nodeid: {}}` to + `{role: tgt}`(#133) +* `delphin.mrs.compare.isomorphic()` compares predicates using a normalized form +* Updated the code and the docstrings for references to 'string' and 'grammar' + predicates to refer to 'surface' and 'abstract' predicates (#117) ### Deprecated diff --git a/delphin/mrs/compare.py b/delphin/mrs/compare.py index edf55ac..268773a 100644 --- a/delphin/mrs/compare.py +++ b/delphin/mrs/compare.py @@ -43,9 +43,9 @@ def _make_digraph(x, check_varprops): for ep in x.eps(): nid, pred, args = ep[0], ep[1], ep[3] if CONSTARG_ROLE in args: - s = '{}({})'.format(pred.string, args[CONSTARG_ROLE]) + s = '{}({})'.format(pred.short_form(), args[CONSTARG_ROLE]) else: - s = pred.string + s = pred.short_form() dg.add_node(nid, sig=s) dg.add_edges_from((nid, var_id(val)) for role, val in args.items() if role != CONSTARG_ROLE) @@ -450,7 +450,7 @@ def _isomorphic_var_signature(vd, xmrs, check_varprops): else: for nid in refval: pred = xmrs.pred(nid) - sig.append('%s:%s' % (pred.string, role)) + sig.append('%s:%s' % (pred.short_form(), role)) return ' '.join(sorted(sig)) diff --git a/delphin/mrs/components.py b/delphin/mrs/components.py index 31deaae..deb4275 100644 --- a/delphin/mrs/components.py +++ b/delphin/mrs/components.py @@ -10,6 +10,7 @@ from collections import namedtuple, MutableMapping from itertools import starmap from delphin.exceptions import (XmrsError, XmrsStructureError) +from delphin.util import deprecated from .config import ( IVARG_ROLE, CONSTARG_ROLE, RSTR_ROLE, UNKNOWNSORT, HANDLESORT, CVARSORT, QUANTIFIER_POS, @@ -446,8 +447,8 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): In PyDelphin, Preds are equivalent if they have the same lemma, pos, and sense, and are both abstract or both surface preds. Other - factors are ignored for comparison, such as their being string-, - grammar-, or real-preds, whether they are quoted or not, whether + factors are ignored for comparison, such as their being surface-, + abstract-, or real-preds, whether they are quoted or not, whether they end with `_rel` or not, or differences in capitalization. Hashed Pred objects (e.g., in a dict or set) also use the normalized form. However, unlike with equality comparisons, @@ -455,7 +456,7 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): Args: type: the type of predicate; valid values are - Pred.GRAMMARPRED, Pred.REALPRED, and Pred.STRINGPRED, + Pred.ABSTRACT, Pred.REALPRED, and Pred.SURFACE, although in practice Preds are instantiated via classmethods that select the type lemma: the lemma of the predicate @@ -464,8 +465,8 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): Returns: a Pred object Attributes: - type: predicate type (Pred.GRAMMARPRED, Pred.REALPRED, or - Pred.STRINGPRED) + type: predicate type (Pred.ABSTRACT, Pred.REALPRED, + and Pred.SURFACE) lemma: lemma component of the predicate pos: part-of-speech component of the predicate sense: sense component of the predicate @@ -475,9 +476,9 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): capitalization doesn't matter. In addition, preds may be compared directly to their string representations: - >>> p1 = Pred.stringpred('_dog_n_1_rel') + >>> p1 = Pred.surface('_dog_n_1_rel') >>> p2 = Pred.realpred(lemma='dog', pos='n', sense='1') - >>> p3 = Pred.grammarpred('dog_n_1_rel') + >>> p3 = Pred.abstract('dog_n_1_rel') >>> p1 == p2 True >>> p1 == '_dog_n_1_rel' @@ -493,15 +494,15 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): re.IGNORECASE ) # Pred types (used mainly in input/output, not internally in pyDelphin) - GRAMMARPRED = 0 # only a string allowed (quoted or not) + ABSTRACT = GRAMMARPRED = 0 # only a string allowed (quoted or not) REALPRED = 1 # may explicitly define lemma, pos, sense - STRINGPRED = 2 # quoted string form of realpred + SURFACE = STRINGPRED = 2 # quoted string form of realpred def __eq__(self, other): if other is None: return False if not isinstance(other, Pred): - other = Pred.stringpred(other) + other = Pred.surface(other) return self.short_form().lower() == other.short_form().lower() def __str__ (self): @@ -514,24 +515,42 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): return hash(self.short_form()) @classmethod - def stringpred(cls, predstr): + @deprecated(final_version='1.0.0', alternative='Pred.surface()') + def stringpred(cls, predstr): + """Instantiate a Pred from its quoted string representation.""" + return cls.surface(predstr) + + @classmethod + def surface(cls, predstr): """Instantiate a Pred from its quoted string representation.""" lemma, pos, sense, _ = split_pred_string(predstr) - return cls(Pred.STRINGPRED, lemma, pos, sense, predstr) + return cls(Pred.SURFACE, lemma, pos, sense, predstr) @classmethod + @deprecated(final_version='1.0.0', alternative='Pred.abstract()') def grammarpred(cls, predstr): + """Instantiate a Pred from its symbol string.""" + return cls.abstract(predstr) + + @classmethod + def abstract(cls, predstr): """Instantiate a Pred from its symbol string.""" lemma, pos, sense, _ = split_pred_string(predstr) - return cls(Pred.GRAMMARPRED, lemma, pos, sense, predstr) + return cls(Pred.ABSTRACT, lemma, pos, sense, predstr) + + @classmethod + @deprecated(final_version='1.0.0', alternative='Pred.surface_or_abstract()') + def string_or_grammar_pred(cls, predstr): + """Instantiate a Pred from either its surface or abstract symbol.""" + return cls.surface_or_abstract(predstr) - @staticmethod - def string_or_grammar_pred(predstr): - """Instantiate a Pred from either its string or grammar symbol.""" + @classmethod + def surface_or_abstract(cls, predstr): + """Instantiate a Pred from either its surface or abstract symbol.""" if predstr.strip('"').lstrip("'").startswith('_'): - return Pred.stringpred(predstr) + return cls.surface(predstr) else: - return Pred.grammarpred(predstr) + return cls.abstract(predstr) @classmethod def realpred(cls, lemma, pos, sense=None): @@ -554,7 +573,7 @@ class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))): Example: - >>> p = Pred.stringpred('"_cat_n_1_rel"') + >>> p = Pred.surface('"_cat_n_1_rel"') >>> p.short_form() '_cat_n_1' """ diff --git a/delphin/mrs/dmrx.py b/delphin/mrs/dmrx.py index 86a79a4..18cb1ed 100644 --- a/delphin/mrs/dmrx.py +++ b/delphin/mrs/dmrx.py @@ -166,7 +166,7 @@ def _decode_pred(elem): # sense CDATA #IMPLIED > # <!ELEMENT gpred (#PCDATA)> if elem.tag == 'gpred': - return Pred.grammarpred(elem.text) + return Pred.abstract(elem.text) elif elem.tag == 'realpred': return Pred.realpred(elem.get('lemma'), elem.get('pos') or None, @@ -267,10 +267,10 @@ def _encode_node(node, properties): def _encode_pred(pred): - if pred.type == Pred.GRAMMARPRED: + if pred.type == Pred.ABSTRACT: e = etree.Element('gpred') e.text = pred.string.strip('"\'') - elif pred.type in (Pred.REALPRED, Pred.STRINGPRED): + elif pred.type in (Pred.REALPRED, Pred.SURFACE): attributes = {} attributes['lemma'] = pred.lemma if pred.pos is None: diff --git a/delphin/mrs/eds.py b/delphin/mrs/eds.py index 280b5fa..f99799b 100644 --- a/delphin/mrs/eds.py +++ b/delphin/mrs/eds.py @@ -163,7 +163,7 @@ class Eds(object): """ Decode a dictionary, as from :meth:`to_dict`, into an Eds object. """ - makepred, charspan = Pred.string_or_grammar_pred, Lnk.charspan + makepred, charspan = Pred.surface_or_abstract, Lnk.charspan top = d.get('top') nodes, edges = [], [] for nid, node in d.get('nodes', {}).items(): @@ -231,7 +231,7 @@ class Eds(object): nids.append(src) nd[src] = {'pred': None, 'lnk': None, 'carg': None, 'si': []} if rel == 'predicate': - nd[src]['pred'] = Pred.string_or_grammar_pred(tgt) + nd[src]['pred'] = Pred.surface_or_abstract(tgt) elif rel == 'lnk': cfrom, cto = tgt.strip('"<>').split(':') nd[src]['lnk'] = Lnk.charspan(int(cfrom), int(cto)) @@ -501,7 +501,7 @@ _COMMA = regex(r',\s*') _SPACES = regex(r'\s+', value=Ignore) _SYMBOL = regex(r'[-+\w]+') _PRED = regex(r'((?!<-?\d|\("|\{|\[)\w)+', - value=Pred.string_or_grammar_pred) + value=Pred.surface_or_abstract) _EDS = nt('EDS', value=_make_eds) _TOP = opt(nt('TOP'), default=None) _TOPID = opt(_SYMBOL, default=None) diff --git a/delphin/mrs/mrx.py b/delphin/mrs/mrx.py index eadcbfd..d5ebfa1 100644 --- a/delphin/mrs/mrx.py +++ b/delphin/mrs/mrx.py @@ -217,9 +217,9 @@ def _decode_pred(elem): # pos (v|n|j|r|p|q|c|x|u|a|s) #REQUIRED # sense CDATA #IMPLIED > if elem.tag == 'pred': - return Pred.grammarpred(elem.text) + return Pred.abstract(elem.text) elif elem.tag == 'spred': - return Pred.stringpred(elem.text) + return Pred.surface(elem.text) elif elem.tag == 'realpred': return Pred.realpred(elem.get('lemma'), elem.get('pos') or None, @@ -364,10 +364,10 @@ def _encode_ep(ep, varprops=None): def _encode_pred(pred): p = None - if pred.type == Pred.GRAMMARPRED: + if pred.type == Pred.ABSTRACT: p = etree.Element('pred') p.text = pred.string - elif pred.type == Pred.STRINGPRED: + elif pred.type == Pred.SURFACE: p = etree.Element('spred') p.text = pred.string elif pred.type == Pred.REALPRED: diff --git a/delphin/mrs/simplemrs.py b/delphin/mrs/simplemrs.py index 40f936a..ab194b7 100644 --- a/delphin/mrs/simplemrs.py +++ b/delphin/mrs/simplemrs.py @@ -280,7 +280,7 @@ def _read_ep(tokens, nid, vars_): _var_re = var_re # begin parsing _read_literals(tokens, '[') - pred = Pred.string_or_grammar_pred(tokens.popleft()) + pred = Pred.surface_or_abstract(tokens.popleft()) lnk = _read_lnk(tokens) surface = label = None if tokens[0].startswith('"'): diff --git a/delphin/mrs/xmrs.py b/delphin/mrs/xmrs.py index 7dd411e..9b6d7a2 100644 --- a/delphin/mrs/xmrs.py +++ b/delphin/mrs/xmrs.py @@ -541,7 +541,7 @@ class Xmrs(_LnkMixin): rank[n] = 0 elif pred.is_quantifier(): rank[n] = 0 - elif pred.type == Pred.GRAMMARPRED: + elif pred.type == Pred.ABSTRACT: rank[n] = 2 else: rank[n] = 1 @@ -746,7 +746,7 @@ class Mrs(Xmrs): >>> top='h0', >>> index='e2', >>> rels=[ElementaryPredication( - >>> Pred.stringpred('_rain_v_1_rel'), + >>> Pred.surface('_rain_v_1_rel'), >>> label='h1', >>> args={'ARG0': 'e2'}, >>> vars={'e2': {'SF': 'prop-or-ques', 'TENSE': 'present'}} @@ -822,7 +822,7 @@ class Mrs(Xmrs): def _ep(ep): return ElementaryPredication( nodeid=None, - pred=Pred.string_or_grammar_pred(ep['predicate']), + pred=Pred.surface_or_abstract(ep['predicate']), label=ep['label'], args=ep.get('arguments', {}), lnk=_lnk(ep.get('lnk')), @@ -882,7 +882,7 @@ def Rmrs(top=None, index=None, xarg=None, >>> index='e2', >>> eps=[ElementaryPredication( >>> 10000, - >>> Pred.stringpred('_rain_v_1_rel'), + >>> Pred.surface('_rain_v_1_rel'), >>> 'h1' >>> )], >>> args={10000: {'ARG0': 'e2'}}, @@ -934,7 +934,7 @@ class Dmrs(Xmrs): Example: - >>> rain = Node(10000, Pred.stringpred('_rain_v_1_rel'), + >>> rain = Node(10000, Pred.surface('_rain_v_1_rel'), >>> sortinfo={'cvarsort': 'e'}) >>> ltop_link = Link(0, 10000, post='H') >>> d = Dmrs([rain], [ltop_link]) @@ -1056,7 +1056,7 @@ class Dmrs(Xmrs): def _node(obj): return Node( obj.get('nodeid'), - Pred.string_or_grammar_pred(obj.get('predicate')), + Pred.surface_or_abstract(obj.get('predicate')), sortinfo=obj.get('sortinfo'), lnk=_lnk(obj.get('lnk')), surface=obj.get('surface'), @@ -1120,7 +1120,7 @@ class Dmrs(Xmrs): nids.append(src) nd[src] = {'pred': None, 'lnk': None, 'carg': None, 'si': []} if rel == 'predicate': - nd[src]['pred'] = Pred.string_or_grammar_pred(tgt) + nd[src]['pred'] = Pred.surface_or_abstract(tgt) elif rel == 'lnk': cfrom, cto = tgt.strip('"<>').split(':') nd[src]['lnk'] = Lnk.charspan(int(cfrom), int(cto))
Predicate terminology is outdated PyDelphin still refers to "string" and "grammar" predicates, where these should be "surface" and "abstract" ("string" is compared to "type", not "grammar"/"abstract").
delph-in/pydelphin
diff --git a/tests/mrs_Dmrs_test.py b/tests/mrs_Dmrs_test.py index 9d2980b..de621ab 100644 --- a/tests/mrs_Dmrs_test.py +++ b/tests/mrs_Dmrs_test.py @@ -8,7 +8,7 @@ from delphin.mrs.config import UNKNOWNSORT #from delphin.mrs import simplemrs # for convenience in later tests from delphin.exceptions import XmrsError -sp = Pred.stringpred +sp = Pred.surface # for convenience diff --git a/tests/mrs_Mrs_test.py b/tests/mrs_Mrs_test.py index 00fe320..38d1588 100644 --- a/tests/mrs_Mrs_test.py +++ b/tests/mrs_Mrs_test.py @@ -16,7 +16,7 @@ from delphin.mrs import Mrs #from delphin.mrs import simplemrs # for convenience in later tests from delphin.exceptions import XmrsError -sp = Pred.stringpred +sp = Pred.surface # for convenience diff --git a/tests/mrs_components_test.py b/tests/mrs_components_test.py index fbfecd1..7238afe 100644 --- a/tests/mrs_components_test.py +++ b/tests/mrs_components_test.py @@ -10,7 +10,7 @@ from delphin.mrs.components import ( Pred, split_pred_string, is_valid_pred_string, normalize_pred_string, Node, ElementaryPredication as EP ) -spred = Pred.stringpred +spred = Pred.surface from delphin.mrs.xmrs import Xmrs from delphin.mrs.config import ( CVARSORT, IVARG_ROLE, CONSTARG_ROLE, RSTR_ROLE, @@ -338,27 +338,27 @@ def test_hcons(): class TestPred(): def testGpred(self): - p = Pred.grammarpred('pron_rel') - assert p.type == Pred.GRAMMARPRED + p = Pred.abstract('pron_rel') + assert p.type == Pred.ABSTRACT assert p.string == 'pron_rel' assert p.lemma == 'pron' assert p.pos == None assert p.sense == None assert p.short_form() == 'pron' - p = Pred.grammarpred('udef_q_rel') + p = Pred.abstract('udef_q_rel') assert p.string == 'udef_q_rel' assert p.lemma == 'udef' assert p.pos == 'q' assert p.sense == None assert p.short_form() == 'udef_q' - p = Pred.grammarpred('udef_q') + p = Pred.abstract('udef_q') assert p.string == 'udef_q' assert p.lemma == 'udef' assert p.pos == 'q' assert p.sense == None assert p.short_form() == 'udef_q' - p = Pred.grammarpred('abc_def_ghi_rel') - assert p.type == Pred.GRAMMARPRED + p = Pred.abstract('abc_def_ghi_rel') + assert p.type == Pred.ABSTRACT assert p.string == 'abc_def_ghi_rel' # pos must be a single character, so we get abc_def, ghi, rel assert p.lemma == 'abc_def' @@ -369,28 +369,28 @@ class TestPred(): def testSpred(self): p = spred('_dog_n_rel') - assert p.type == Pred.STRINGPRED + assert p.type == Pred.SURFACE assert p.string == '_dog_n_rel' assert p.lemma == 'dog' assert p.pos == 'n' assert p.sense == None assert p.short_form() == '_dog_n' p = spred('_犬_n_rel') - assert p.type == Pred.STRINGPRED + assert p.type == Pred.SURFACE assert p.string == '_犬_n_rel' assert p.lemma == '犬' assert p.pos == 'n' assert p.sense == None assert p.short_form() == '_犬_n' p = spred('"_dog_n_1_rel"') - assert p.type == Pred.STRINGPRED + assert p.type == Pred.SURFACE assert p.string == '"_dog_n_1_rel"' assert p.lemma == 'dog' assert p.pos == 'n' assert p.sense == '1' assert p.short_form() == '_dog_n_1' p = spred('"_dog_n_1"') - assert p.type == Pred.STRINGPRED + assert p.type == Pred.SURFACE assert p.string == '"_dog_n_1"' assert p.lemma == 'dog' assert p.pos == 'n' @@ -398,7 +398,7 @@ class TestPred(): assert p.short_form() == '_dog_n_1' # see https://github.com/delph-in/pydelphin/issues/129 p = spred('_te_adjunct_rel') - assert p.type == Pred.STRINGPRED + assert p.type == Pred.SURFACE assert p.string == '_te_adjunct_rel' assert p.lemma == 'te' assert p.pos == None @@ -410,11 +410,11 @@ class TestPred(): #with pytest.raises(ValueError): spred('_dog_n_1_2_rel') repr(p) # no error - def testStringOrGrammarPred(self): - p = Pred.string_or_grammar_pred('_dog_n_rel') - assert p.type == Pred.STRINGPRED - p = Pred.string_or_grammar_pred('pron_rel') - assert p.type == Pred.GRAMMARPRED + def testSurfaceOrAbstractPred(self): + p = Pred.surface_or_abstract('_dog_n_rel') + assert p.type == Pred.SURFACE + p = Pred.surface_or_abstract('pron_rel') + assert p.type == Pred.ABSTRACT def testRealPred(self): # basic, no sense arg @@ -459,8 +459,8 @@ class TestPred(): assert spred('_dog_n_rel') == '_dog_n_rel' assert '_dog_n_rel' == Pred.realpred(lemma='dog', pos='n') assert spred('"_dog_n_rel"') == spred("'_dog_n_rel") - assert Pred.grammarpred('pron_rel') == 'pron_rel' - assert Pred.string_or_grammar_pred('_dog_n_rel') != Pred.string_or_grammar_pred('dog_n_rel') + assert Pred.abstract('pron_rel') == 'pron_rel' + assert Pred.surface_or_abstract('_dog_n_rel') != Pred.surface_or_abstract('dog_n_rel') assert (spred('_dog_n_rel') == None) == False assert spred('_dog_n_1_rel') == spred('_Dog_N_1_rel') assert spred('_dog_n_1_rel') == spred('_dog_n_1') diff --git a/tests/mrs_eds_test.py b/tests/mrs_eds_test.py index ca73ea3..eb2ce90 100644 --- a/tests/mrs_eds_test.py +++ b/tests/mrs_eds_test.py @@ -70,7 +70,7 @@ def eds_it_rains(): nodes=[ Node( 'e2', - Pred.stringpred('"_rain_v_1_rel"'), + Pred.surface('"_rain_v_1_rel"'), sortinfo={ 'SF': 'prop', 'TENSE': 'pres', 'MOOD': 'indicative', 'PROG': '-', 'PERF': '-', CVARSORT: 'e'}, @@ -85,11 +85,11 @@ def eds_dogs_chase_Kim(): return eds.Eds( top='e2', nodes=[ - Node('_1', Pred.stringpred('udef_q_rel')), - Node('x4', Pred.stringpred('"_dog_n_1_rel"')), - Node('e2', Pred.stringpred('"_chase_v_1_rel"')), - Node('_2', Pred.stringpred('proper_q_rel')), - Node('x6', Pred.stringpred('named_rel'), carg='Kim') + Node('_1', Pred.surface('udef_q_rel')), + Node('x4', Pred.surface('"_dog_n_1_rel"')), + Node('e2', Pred.surface('"_chase_v_1_rel"')), + Node('_2', Pred.surface('proper_q_rel')), + Node('x6', Pred.surface('named_rel'), carg='Kim') ], edges=[ ('_1', 'BV', 'x4'), @@ -104,10 +104,10 @@ def eds_kim_probably_sleeps(): return eds.Eds( top='e9', nodes=[ - Node('_1', Pred.stringpred('proper_q_rel')), - Node('x3', Pred.stringpred('named_rel'), carg='Kim'), - Node('e9', Pred.stringpred('_probable_a_1_rel')), - Node('e2', Pred.stringpred('_sleep_v_1_rel')), + Node('_1', Pred.surface('proper_q_rel')), + Node('x3', Pred.surface('named_rel'), carg='Kim'), + Node('e9', Pred.surface('_probable_a_1_rel')), + Node('e2', Pred.surface('_sleep_v_1_rel')), ], edges=[ ('_1', 'BV', 'x3'), diff --git a/tests/mrs_isomorphism_test.py b/tests/mrs_isomorphism_test.py new file mode 100644 index 0000000..7141200 --- /dev/null +++ b/tests/mrs_isomorphism_test.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Jul 26 17:33:39 2018 + +@author: aymm +""" + +from delphin.mrs import simplemrs, compare + +# s1 and s2 differ only by the presence of '_rel' at the end of the predicates + +s1 = '[ LTOP: h0 INDEX: e2 [ e SF: prop TENSE: past MOOD: indicative PROG: - PERF: - ] RELS: < [ proper_q<0:6> LBL: h4 ARG0: x3 [ x PERS: 3 NUM: sg IND: + ] RSTR: h5 BODY: h6 ] [ named<0:6> LBL: h7 CARG: "Abrams" ARG0: x3 ] [ _sleep_v_1<7:13> LBL: h1 ARG0: e2 ARG1: x3 ] > HCONS: < h0 qeq h1 h5 qeq h7 > ]' + +s2 = '[ LTOP: h0 INDEX: e2 [ e SF: prop TENSE: past MOOD: indicative PROG: - PERF: - ] RELS: < [ proper_q_rel<0:6> LBL: h4 ARG0: x3 [ x PERS: 3 NUM: sg IND: + ] RSTR: h5 BODY: h6 ] [ named_rel<0:6> LBL: h7 CARG: "Abrams" ARG0: x3 ] [ _sleep_v_1_rel<7:13> LBL: h1 ARG0: e2 ARG1: x3 ] > HCONS: < h0 qeq h1 h5 qeq h7 > ]' + +x1 = simplemrs.loads_one(s1) +x2 = simplemrs.loads_one(s2) + +def test_isomorphic(): + assert x1 == x2 # generally a stricter test than isomorphism + assert compare.isomorphic(x1, x2) + assert compare.isomorphic(x1, x1) + assert compare.isomorphic(x2, x2) diff --git a/tests/mrs_path_test.py b/tests/mrs_path_test.py index a17b3d5..478f8e8 100644 --- a/tests/mrs_path_test.py +++ b/tests/mrs_path_test.py @@ -2,7 +2,7 @@ from delphin.mrs import xmrs, path as mp from delphin.mrs.components import Pred, ElementaryPredication as EP -sp = Pred.stringpred +sp = Pred.surface def qeq(hi, lo): return (hi, 'qeq', lo) diff --git a/tests/mrs_query_test.py b/tests/mrs_query_test.py index b989959..4ce6e7a 100644 --- a/tests/mrs_query_test.py +++ b/tests/mrs_query_test.py @@ -3,7 +3,7 @@ from delphin.mrs.xmrs import Xmrs from delphin.mrs.components import ElementaryPredication as EP, Pred from delphin.mrs import query -sp = Pred.stringpred +sp = Pred.surface qeq = lambda hi, lo: (hi, 'qeq', lo) # "Cats are chased by big dogs." (reordered, but equivalent) diff --git a/tests/mrs_xmrs_test.py b/tests/mrs_xmrs_test.py index 4dc1635..41808c8 100644 --- a/tests/mrs_xmrs_test.py +++ b/tests/mrs_xmrs_test.py @@ -33,10 +33,10 @@ class TestXmrs(): assert len(x.eps()) == 0 # nodeid and pred with pytest.raises(XmrsError): - x.add_eps([(10000, Pred.stringpred('_v_v_rel'))]) + x.add_eps([(10000, Pred.surface('_v_v_rel'))]) assert len(x.eps()) == 0 # nodeid, pred, and label (the minimum) - x.add_eps([(10000, Pred.stringpred('_v_v_rel'), 'h1')]) + x.add_eps([(10000, Pred.surface('_v_v_rel'), 'h1')]) # make sure it was entered correctly and is unchanged assert len(x.eps()) == 1 assert x.eps()[0][0] == 10000 @@ -46,7 +46,7 @@ class TestXmrs(): # nodeid, pred, label, and argdict x = Xmrs() - x.add_eps([(10000, Pred.stringpred('_v_v_rel'), 'h1', {})]) + x.add_eps([(10000, Pred.surface('_v_v_rel'), 'h1', {})]) assert len(x.eps()) == 1 assert x.eps()[0][0] == 10000 ep = x.ep(10000) @@ -57,7 +57,7 @@ class TestXmrs(): # cannot have more than one ep with the same nodeid with pytest.raises(XmrsError): - x.add_eps([(10000, Pred.stringpred('_n_n_rel'), 'h3', {})]) + x.add_eps([(10000, Pred.surface('_n_n_rel'), 'h3', {})]) assert len(x.eps()) == 1 def test_add_hcons(self): @@ -112,7 +112,7 @@ class TestXmrs(): assert x.xarg == 'e0' def test_nodeid(self): - sp = Pred.stringpred + sp = Pred.surface x = Xmrs() with pytest.raises(KeyError): x.nodeid('e2') @@ -128,7 +128,7 @@ class TestXmrs(): assert x.nodeid('x4', quantifier=True) == 11 def test_nodeids(self): - sp = Pred.stringpred + sp = Pred.surface x = Xmrs(eps=[(10, sp('_n_n_rel'), 'h3', {'ARG0': 'x4'})]) assert x.nodeids() == [10] assert x.nodeids(ivs=['x4']) == [10] @@ -143,7 +143,7 @@ class TestXmrs(): assert sorted(x.nodeids(ivs=['x4'], quantifier=False)) == [10] def test_ep(self): - sp = Pred.stringpred + sp = Pred.surface x = Xmrs() with pytest.raises(TypeError): x.ep() @@ -153,7 +153,7 @@ class TestXmrs(): assert x.ep(10)[1] == sp('_n_n_rel') def test_eps(self): - sp = Pred.stringpred + sp = Pred.surface x = Xmrs() assert len(x.eps()) == 0 x.add_eps([(10, sp('_n_n_rel'), 'h3', {'ARG0': 'x4'})]) @@ -236,7 +236,7 @@ class TestXmrs(): assert len(x.icons(left='x7')) == 1 def test_variables_and_properties(self): - sp = Pred.stringpred + sp = Pred.surface # variables can be passed in with properties x = Xmrs(vars={'x1':{'PERS':'3','NUM':'sg'}, 'e2':{'SF':'prop'}}) assert len(x.variables()) == 2 @@ -287,11 +287,11 @@ class TestXmrs(): # KeyError on bad nodeid with pytest.raises(KeyError): x.pred(10) # but otherwise preds can be retrieved by nodeid - x.add_eps([(10, Pred.stringpred('_n_n_rel'), 'h3', {'ARG0': 'x4'})]) + x.add_eps([(10, Pred.surface('_n_n_rel'), 'h3', {'ARG0': 'x4'})]) assert x.pred(10).string == '_n_n_rel' def test_preds(self): - sp = Pred.stringpred + sp = Pred.surface x = Xmrs( eps=[ (10, sp('_v_v_rel'), 'h3', {'ARG0': 'e2', 'ARG1': 'x4'}), @@ -324,12 +324,12 @@ class TestXmrs(): def test_label(self): # retrieve the label for a single ep, or KeyError if no such nodeid - x = Xmrs(eps=[(10, Pred.stringpred('_v_v_rel'), 'h3', {'ARG0': 'e2'})]) + x = Xmrs(eps=[(10, Pred.surface('_v_v_rel'), 'h3', {'ARG0': 'e2'})]) assert x.label(10) == 'h3' with pytest.raises(KeyError): x.label(11) def test_labels(self): - sp = Pred.stringpred + sp = Pred.surface # same as Xmrs.labels() but with a list of nodeids x = Xmrs( eps=[ @@ -347,7 +347,7 @@ class TestXmrs(): def test_args(self): # return the argument dict of a nodeid, or KeyError for missing nodeid x = Xmrs( - eps=[(10, Pred.stringpred('_v_v_rel'), 'h3', + eps=[(10, Pred.surface('_v_v_rel'), 'h3', {'ARG0': 'e2', 'ARG1': 'x4'})] ) assert x.args(10) == {'ARG0': 'e2', 'ARG1': 'x4'} @@ -356,11 +356,11 @@ class TestXmrs(): x.args(10)['ARG1'] = 'x6' assert x.args(10)['ARG1'] == 'x4' # return empty arg dict for EP without specified args: - x = Xmrs(eps=[(10, Pred.stringpred('_v_v_rel'), 'h3')]) + x = Xmrs(eps=[(10, Pred.surface('_v_v_rel'), 'h3')]) assert x.args(10) == {} def test_outgoing_args(self): - sp = Pred.stringpred + sp = Pred.surface # Outgoing args are those that, from some start node, go to # another in some way. These ways include: # regular variable args @@ -404,7 +404,7 @@ class TestXmrs(): def test_incoming_args(self): # incoming_args() is like the reverse of outgoing_args(), but # now it's many-to-one instead of one-to-many - sp = Pred.stringpred + sp = Pred.surface x = Xmrs( eps=[ (10, sp('_v_v_rel'), 'h3', {'ARG0': 'e2'}), @@ -479,7 +479,7 @@ class TestXmrs(): assert 10001 not in x assert '10000' not in x assert '_v_v_rel' not in x - assert Pred.stringpred('_v_v_rel') not in x + assert Pred.surface('_v_v_rel') not in x def test_labelset(self): pass def test_labelset_heads(self): pass @@ -493,9 +493,9 @@ class TestXmrs(): with pytest.raises(XmrsError): Xmrs(hcons=[('h0', 'qeq', 'h1')]).is_connected() # just a pred is fine (even without ARG0) - x = Xmrs(eps=[(10, Pred.stringpred('_v_v_rel'), 'h1', {})]) + x = Xmrs(eps=[(10, Pred.surface('_v_v_rel'), 'h1', {})]) assert x.is_connected() == True - x = Xmrs(eps=[(10, Pred.stringpred('_v_v_rel'), 'h1', + x = Xmrs(eps=[(10, Pred.surface('_v_v_rel'), 'h1', {'ARG0':'e2'})]) assert x.is_connected() == True # disconnected top is fine @@ -553,7 +553,7 @@ class TestXmrs(): pass def test_subgraph(self): - nodes = [Node(1,Pred.stringpred('verb'))] + nodes = [Node(1,Pred.surface('verb'))] links = [Link(0,1,'','H')] graph = Dmrs(nodes,links) new_graph = graph.subgraph([1])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 8 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "penman>=0.6.1", "networkx", "requests", "Pygments", "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work execnet==2.1.1 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work networkx==3.2.1 packaging @ file:///croot/packaging_1734472117206/work Penman==1.3.1 pluggy @ file:///croot/pluggy_1733169602837/work -e git+https://github.com/delph-in/pydelphin.git@83108aee8c1db2b4293f118616301d092a33f4df#egg=PyDelphin Pygments==2.19.1 pytest @ file:///croot/pytest_1738938843180/work pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 requests==2.32.3 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0 urllib3==2.3.0
name: pydelphin channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - execnet==2.1.1 - idna==3.10 - networkx==3.2.1 - penman==1.3.1 - pygments==2.19.1 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - requests==2.32.3 - typing-extensions==4.13.0 - urllib3==2.3.0 prefix: /opt/conda/envs/pydelphin
[ "tests/mrs_Dmrs_test.py::TestDmrs::test_empty", "tests/mrs_Dmrs_test.py::TestDmrs::test_single_node", "tests/mrs_Dmrs_test.py::TestDmrs::test_to_dict", "tests/mrs_Dmrs_test.py::TestDmrs::test_from_dict", "tests/mrs_Dmrs_test.py::TestDmrs::test_to_triples", "tests/mrs_Dmrs_test.py::TestDmrs::test_from_triples", "tests/mrs_Mrs_test.py::TestMrs::test_empty", "tests/mrs_Mrs_test.py::TestMrs::test_single_ep", "tests/mrs_Mrs_test.py::TestMrs::test_to_dict", "tests/mrs_Mrs_test.py::TestMrs::test_from_dict", "tests/mrs_components_test.py::test_sort_vid_split", "tests/mrs_components_test.py::test_var_sort", "tests/mrs_components_test.py::test_var_id", "tests/mrs_components_test.py::TestVarGenerator::test_init", "tests/mrs_components_test.py::TestVarGenerator::test_new", "tests/mrs_components_test.py::TestLnk::test_raw_init", "tests/mrs_components_test.py::TestLnk::testCharSpanLnk", "tests/mrs_components_test.py::TestLnk::testChartSpanLnk", "tests/mrs_components_test.py::TestLnk::testTokensLnk", "tests/mrs_components_test.py::TestLnk::testEdgeLnk", "tests/mrs_components_test.py::TestLnkMixin::test_inherit", "tests/mrs_components_test.py::TestLink::test_construct", "tests/mrs_components_test.py::test_links", "tests/mrs_components_test.py::TestHandleConstraint::test_construct", "tests/mrs_components_test.py::TestHandleConstraint::test_qeq", "tests/mrs_components_test.py::TestHandleConstraint::test_equality", "tests/mrs_components_test.py::TestHandleConstraint::test_hashable", "tests/mrs_components_test.py::test_hcons", "tests/mrs_components_test.py::TestPred::testGpred", "tests/mrs_components_test.py::TestPred::testSpred", "tests/mrs_components_test.py::TestPred::testSurfaceOrAbstractPred", "tests/mrs_components_test.py::TestPred::testRealPred", "tests/mrs_components_test.py::TestPred::testEq", "tests/mrs_components_test.py::TestPred::test_is_quantifier", "tests/mrs_components_test.py::TestPred::test_hash", "tests/mrs_components_test.py::test_split_pred_string", "tests/mrs_components_test.py::test_is_valid_pred_string", "tests/mrs_components_test.py::test_normalize_pred_string", "tests/mrs_components_test.py::TestNode::test_construct", "tests/mrs_components_test.py::TestNode::test_sortinfo", "tests/mrs_components_test.py::TestNode::test_properties", "tests/mrs_components_test.py::TestNode::test_lnk", "tests/mrs_components_test.py::TestNode::test_cvarsort", "tests/mrs_components_test.py::TestElementaryPredication::test_construct", "tests/mrs_components_test.py::TestElementaryPredication::test_args", "tests/mrs_components_test.py::TestElementaryPredication::test_is_quantifier", "tests/mrs_eds_test.py::TestEds::test_init", "tests/mrs_eds_test.py::TestEds::test_to_dict", "tests/mrs_eds_test.py::test_deserialize", "tests/mrs_eds_test.py::test_serialize", "tests/mrs_eds_test.py::test_serialize_list", "tests/mrs_path_test.py::test_headed_walk", "tests/mrs_path_test.py::test_topdown_walk", "tests/mrs_path_test.py::test_bottomup_walk", "tests/mrs_query_test.py::test_select_nodeids", "tests/mrs_query_test.py::test_select_nodes", "tests/mrs_query_test.py::test_select_eps", "tests/mrs_query_test.py::test_select_args", "tests/mrs_query_test.py::test_select_links", "tests/mrs_query_test.py::test_select_hcons", "tests/mrs_query_test.py::test_select_icons", "tests/mrs_query_test.py::test_find_argument_target", "tests/mrs_query_test.py::test_find_subgraphs_by_pred", "tests/mrs_query_test.py::test_intrinsic_variable", "tests/mrs_query_test.py::test_intrinsic_variables", "tests/mrs_query_test.py::test_bound_variables", "tests/mrs_query_test.py::test_in_labelset", "tests/mrs_query_test.py::test_find_quantifier", "tests/mrs_query_test.py::test_get_outbound_args", "tests/mrs_query_test.py::test_nodeid", "tests/mrs_xmrs_test.py::TestXmrs::test_empty", "tests/mrs_xmrs_test.py::TestXmrs::test_add_eps", "tests/mrs_xmrs_test.py::TestXmrs::test_add_hcons", "tests/mrs_xmrs_test.py::TestXmrs::test_add_icons", "tests/mrs_xmrs_test.py::TestXmrs::test_top", "tests/mrs_xmrs_test.py::TestXmrs::test_index", "tests/mrs_xmrs_test.py::TestXmrs::test_xarg", "tests/mrs_xmrs_test.py::TestXmrs::test_nodeid", "tests/mrs_xmrs_test.py::TestXmrs::test_nodeids", "tests/mrs_xmrs_test.py::TestXmrs::test_ep", "tests/mrs_xmrs_test.py::TestXmrs::test_eps", "tests/mrs_xmrs_test.py::TestXmrs::test_hcon", "tests/mrs_xmrs_test.py::TestXmrs::test_hcons", "tests/mrs_xmrs_test.py::TestXmrs::test_icons", "tests/mrs_xmrs_test.py::TestXmrs::test_variables_and_properties", "tests/mrs_xmrs_test.py::TestXmrs::test_pred", "tests/mrs_xmrs_test.py::TestXmrs::test_preds", "tests/mrs_xmrs_test.py::TestXmrs::test_label", "tests/mrs_xmrs_test.py::TestXmrs::test_labels", "tests/mrs_xmrs_test.py::TestXmrs::test_args", "tests/mrs_xmrs_test.py::TestXmrs::test_outgoing_args", "tests/mrs_xmrs_test.py::TestXmrs::test_incoming_args", "tests/mrs_xmrs_test.py::TestXmrs::test___eq__", "tests/mrs_xmrs_test.py::TestXmrs::test___contains__", "tests/mrs_xmrs_test.py::TestXmrs::test_labelset", "tests/mrs_xmrs_test.py::TestXmrs::test_labelset_heads", "tests/mrs_xmrs_test.py::TestXmrs::test_is_connected", "tests/mrs_xmrs_test.py::TestXmrs::test_is_well_formed", "tests/mrs_xmrs_test.py::TestXmrs::test_subgraph" ]
[ "tests/mrs_isomorphism_test.py::test_isomorphic" ]
[]
[]
MIT License
2,861
[ "delphin/mrs/mrx.py", "delphin/mrs/components.py", "delphin/mrs/simplemrs.py", "CHANGELOG.md", "delphin/mrs/compare.py", "delphin/mrs/dmrx.py", "delphin/mrs/eds.py", "delphin/mrs/xmrs.py" ]
[ "delphin/mrs/mrx.py", "delphin/mrs/components.py", "delphin/mrs/simplemrs.py", "CHANGELOG.md", "delphin/mrs/compare.py", "delphin/mrs/dmrx.py", "delphin/mrs/eds.py", "delphin/mrs/xmrs.py" ]
sigmavirus24__github3.py-879
87af5a1d26597d7cf1a843199a1b5a2449cd8069
2018-08-02 23:08:43
b8e7aa8eb221cd1eec7a8bc002b75de8098dc77a
sigmavirus24: You're right. This method, at this point in time is not going to be correct or conducive to the right behaviour for the future of the library. Here's what I think we should do: 1. Deprecate `Issue.assign`. It only accepts 1 user and that is outdated and likely going to be confusing going forward. 1. Add `add_assignees` (or something along those lines) that does the right thing (accepts a list of usernames/User objects) and calls `edit` appropriately. 1. Add `remove_assignees` which is the inverse of `add_assignees`. Does that make sense? jacquerie: > Does that make sense? Absolutely! I just pushed three commits that implement the three steps that you described.
diff --git a/src/github3/issues/issue.py b/src/github3/issues/issue.py index 15b7632a..cdf673fc 100644 --- a/src/github3/issues/issue.py +++ b/src/github3/issues/issue.py @@ -2,6 +2,7 @@ """Module containing the Issue logic.""" from __future__ import unicode_literals +import warnings from json import dumps from uritemplate import URITemplate @@ -67,6 +68,27 @@ class _Issue(models.GitHubCore): n=self.number, class_name=self.class_name, ) + @requires_auth + def add_assignees(self, users): + """Assign ``users`` to this issue. + + This is a shortcut for :meth:`~github3.issues.issue.Issue.edit`. + + :param users: + users or usernames to assign this issue to + :type users: + list of :class:`~github3.users.User` + :type users: + list of str + :returns: + True if successful, False otherwise + :rtype: + bool + """ + usernames = {getattr(user, 'login', user) for user in users} + assignees = list({a.login for a in self.assignees} | usernames) + return self.edit(assignees=assignees) + @requires_auth def add_labels(self, *args): """Add labels to this issue. @@ -86,6 +108,10 @@ class _Issue(models.GitHubCore): def assign(self, username): """Assign user ``username`` to this issue. + .. deprecated:: 1.2.0 + + Use :meth:`github3.issues.issue.Issue.add_assignees` instead. + This is a short cut for :meth:`~github3.issues.issue.Issue.edit`. :param str username: @@ -95,6 +121,9 @@ class _Issue(models.GitHubCore): :rtype: bool """ + warnings.warn( + 'This method is deprecated. Please use ``add_assignees`` ' + 'instead.', DeprecationWarning, stacklevel=2) if not username: return False number = self.milestone.number if self.milestone else None @@ -300,6 +329,27 @@ class _Issue(models.GitHubCore): json = self._json(self._get(pull_request_url), 200) return self._instance_or_null(pulls.PullRequest, json) + @requires_auth + def remove_assignees(self, users): + """Unassign ``users`` from this issue. + + This is a shortcut for :meth:`~github3.issues.issue.Issue.edit`. + + :param users: + users or usernames to unassign this issue from + :type users: + list of :class:`~github3.users.User` + :type users: + list of str + :returns: + True if successful, False otherwise + :rtype: + bool + """ + usernames = {getattr(user, 'login', user) for user in users} + assignees = list({a.login for a in self.assignees} - usernames) + return self.edit(assignees=assignees) + @requires_auth def remove_label(self, name): """Remove label ``name`` from this issue.
No way to unassign an issue There seems to be an assign function, but not an unassign method for Issue <bountysource-plugin> --- Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/46179344-no-way-to-unassign-an-issue?utm_campaign=plugin&utm_content=tracker%2F183477&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F183477&utm_medium=issues&utm_source=github). </bountysource-plugin>
sigmavirus24/github3.py
diff --git a/tests/cassettes/Issue_add_assignees.json b/tests/cassettes/Issue_add_assignees.json new file mode 100644 index 00000000..418ba17b --- /dev/null +++ b/tests/cassettes/Issue_add_assignees.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA8VW/W/iNhj+V7xIm7aqISSkcGSBatPupP1Au9Poelo5IScx4GtiR/6Apaj/+147gQNEez2u0yQiiP2+j5/36zFrR4vciZyFUqWMPA+XtDWnaqGTVsoLT5CSS0/SeYGXVGgZhF6922mVlUel1ER6Pd93zh1rShUX1fR0SMDJcUJy+Q0YO7S8GmztMVyQRwCHmArC1GvBb+AAmSxfEbcGA9SFKvKDVOxU50V1oZkTBZ2Li34Y9v1zh/GMTM2aM/rtpvvnh6uH7PamGj3c+Nfj9+3r8dsBHMt0kRDhRFDZc0dRlROwv+JohSukONIMSzibIcyQ7QFw0dI4rJ2czykD64IQVWA21/e0wrBvjux1g/ZFp7NP4n33rw9XefopDUaffvFHDyNDAC+xwuKwCeyibDcNak5MOVOQd9ur2mvwL5eDEDDmokGx0RqKz3W6QZPeIevn839oPeN5zleAc0h8f6iOHOVtXYFn/Zuy+akw4Lr2uFoQyCCEZRp/TuWXuv4YLeu2hpmXCprGAEkogSDZ11NrHIHYigGntdUWi6gTmQpaKsrZCZnbcwc4LuaY0Qd8Ihy4S0Cxavb1QVo3cH+JGhzLd+239kpBlzitTHoESQldQsZPxTwAAEhVlWagb8zIQv6pIlOcFWZsZziX5HEjwk5099FWXBlzXhIG5jlP7wlMlDWFSbVKQMCA6Tz//C5r54LmRCrOtvtbyYwCkGNBADubYgX4Qdvvue2u63fG/kUU9KLQ/xvO02W2Z/PGbcMnGAedqN2P/L6xSXMuG5iGhVYLLqZAjqfUtoIRsOurt2Cc8KyamsGGpbgcjmFMCJKEFNJoW0KMqjX6NtMsNc7nKNEKMa7M3lb9CgKHZGjGBfrdqGDslcMJm7B4IeAboG8xUxYTp/dILais1fISxVIJzubDGKOFILPBZHv/rlarVsI1U5XkWqTEKltzz4Zdv9fvhKHLuAtK7Crubri4mLnW6lKrYpriosRQlUGZa1DjH+D1Z7teq+VACSBExPfBO/9NJ+z1tgYFyaguBvV529WayaC+eCYOEiQHxozXOjVxhn9wCZlBNW/E4VJQ38UeHsZeEye6haymKSlVbUSJREuK0Qvj/1+C+nWnCiaYVpzYskKRN02kyD+mcV+vg0z3TNiTbXMsz0+ldpd+qyH8ulwFcIUn3m1Xt+64odkwj+u65uvJiM7O7o4F9fHHzf/R/2Qenp0Fs7k/B2ZlbwZ+Ojt7Ku13u3n/QhhH2/qbubVsUbxjVfmslElV6/Xjv5qtd9IADAAA", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Thu, 02 Aug 2018 23:09:19 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["A2E6:3FAE:2C29574:52316EC:5B6404DA"], "ETag": ["W/\"5b4dd8dd9fd32bdbfc73353b8813f599\""], "Date": ["Fri, 03 Aug 2018 07:31:38 GMT"], "X-RateLimit-Remaining": ["4999"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.104248"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": ["repo"], "X-RateLimit-Reset": ["1533285098"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "recorded_at": "2018-08-03T07:31:38"}, {"request": {"body": {"string": "{\"assignees\": [\"jacquerie\"]}", "encoding": "utf-8"}, "headers": {"Content-Length": ["28"], "Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "PATCH", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1YbW/iRhD+K66lVm0UY4wdSCgQXdWrdB8gdyppTg0ntLYX2MTe9e0L1EH57zdr82IsSMJL1Q89CQSsZ559dnae2R3mpuKR2TQnUiaiadsoIZUxkRPlVwIW2xwnTNiCjGM0JVyJmmfnT91KktpECIWF3XAc89zMTIlkPB0eDgk4EfJxJI7AKNCyc7C5TVGMnwEc1hRjKk8Fv4QDZDw9IW4OBqgTGUelUBR25037QkKzWXMvLq4878o5NykL8VCPmd3fb+t/fu49hXe3affp1rnpf6re9N+3YVqqYh9zswk7e25KIiMM9j1mzFBqSGYoigTMTQ1EjSwHwEUJ7TA3IzYmFKxjjGWM6Fg9khTBcz1lo16rXrjuJolP9b8+96LgIah1H9453aeuJoCmSCJeToJsUFQXCapnDBiVEPcsV5W9wL+etj3AGPMFSrZaTfGlTNdowi6zfjn+ZesRiyI2A5wy8U1RbZnKXrkCz/w7oeNDYcB1bjM5wRBBWJZO/DERr2X9NlqZ2xw0LyQkjQYSsAUch/tTWzgCsRkFTvOstmSIyhcBJ4kkjB4QuQ13gGN8jCh5QgfCgbsAlKya7b/IzA3c31INtsU795vbCSdTFKQ6PBwHmEwh4odilgAAUqaJFvStlizEn0g8RGGsZTtCkcDPyyJsNu+/ZDsutTlLMAXziAWPGBSVmYJSs0qAwWCt/QcUfFWYE10XtPjcS6d2Wd0u+67X7X/wuu/ar8m+tlP2OfwRqi/yfVnxRcu91L5yPFzpZYhjVL7GOkrha5jTqbuAWSwMhyh7DbWvqtee+yt67XsaNRe5bJSCNyl5qU8BYv6u0K2X3LwQl+UFxzgEeK+zuAzxXaG7O4ty0P+vCoUDNiYRFpJROEOpiqJ1lwIXd/jBMZy/4RBJCGat6jSsat1y3L5z0aw1mp7zN2SpSsINm0urCi+3X200XafpXmmbIGJiAZPPgpScMD6EAsECkl2XYILeTe89GPssTIf6KIShVtLpw1USGwLjWOj7v4/1zX/RA4wUDbTzueEraVAm9bNVhxBjmCQ0RowbH3Sn0LKTzoAOaGvC4ROg7xCVGSYKHg05ISLvKK6NlpCc0XGnhYwJx6P2YNWjzmazis8Ulalgigc4u/0velGv7jSuXM+zKLOgW7Eks5ZcLEStzOpayXgYoDhBcHNpJ5GCjuUn+PlrNp53FG3JgRDmP9b+cC5dr9FYGcQ4JCpu5/OtRnMm7bw5G5gGxxEwpiyvHwOz85EJiIyR8zYYNE7yh5aNOi17sU7jDqIaBDiRuRHBwpgSZLxx/f/Jon4r7IJeTKXlZ9sKm7xMIon/0Yl7ugzS2TOgO9NmW5x3hbZIv7IgfFquHLjCu1VMVyvPuI5+oN+WZemPnSs6O7vftqgvPy//s/lX9PCiFvTDTR3okQ0N/HJ2tivs98W4v7KMrWl9NLdKtin2tl1ZV0o/zevx8zchJQr2JBMAAA==", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["A2E6:3FAE:2C295A6:5231732:5B6404DA"], "ETag": ["W/\"b4505c5c6261cbcfd05fc36ec2685139\""], "Date": ["Fri, 03 Aug 2018 07:31:39 GMT"], "X-RateLimit-Remaining": ["4998"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.706423"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": [""], "X-RateLimit-Reset": ["1533285098"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "recorded_at": "2018-08-03T07:31:39"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/cassettes/Issue_remove_assignees.json b/tests/cassettes/Issue_remove_assignees.json new file mode 100644 index 00000000..2a9ff031 --- /dev/null +++ b/tests/cassettes/Issue_remove_assignees.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1YbW/iRhD+K66lVm0UY4wdSCgQXdWrdB8gdyppTg0ntLYX2MTe9e0L1EH57zdr82IsSMJL1Q89CQSsZ559dnae2R3mpuKR2TQnUiaiadsoIZUxkRPlVwIW2xwnTNiCjGM0JVyJmmfnT91KktpECIWF3XAc89zMTIlkPB0eDgk4EfJxJI7AKNCyc7C5TVGMnwEc1hRjKk8Fv4QDZDw9IW4OBqgTGUelUBR25037QkKzWXMvLq4878o5NykL8VCPmd3fb+t/fu49hXe3affp1rnpf6re9N+3YVqqYh9zswk7e25KIiMM9j1mzFBqSGYoigTMTQ1EjSwHwEUJ7TA3IzYmFKxjjGWM6Fg9khTBcz1lo16rXrjuJolP9b8+96LgIah1H9453aeuJoCmSCJeToJsUFQXCapnDBiVEPcsV5W9wL+etj3AGPMFSrZaTfGlTNdowi6zfjn+ZesRiyI2A5wy8U1RbZnKXrkCz/w7oeNDYcB1bjM5wRBBWJZO/DERr2X9NlqZ2xw0LyQkjQYSsAUch/tTWzgCsRkFTvOstmSIyhcBJ4kkjB4QuQ13gGN8jCh5QgfCgbsAlKya7b/IzA3c31INtsU795vbCSdTFKQ6PBwHmEwh4odilgAAUqaJFvStlizEn0g8RGGsZTtCkcDPyyJsNu+/ZDsutTlLMAXziAWPGBSVmYJSs0qAwWCt/QcUfFWYE10XtPjcS6d2Wd0u+67X7X/wuu/ar8m+tlP2OfwRqi/yfVnxRcu91L5yPFzpZYhjVL7GOkrha5jTqbuAWSwMhyh7DbWvqtee+yt67XsaNRe5bJSCNyl5qU8BYv6u0K2X3LwQl+UFxzgEeK+zuAzxXaG7O4ty0P+vCoUDNiYRFpJROEOpiqJ1lwIXd/jBMZy/4RBJCGat6jSsat1y3L5z0aw1mp7zN2SpSsINm0urCi+3X200XafpXmmbIGJiAZPPgpScMD6EAsECkl2XYILeTe89GPssTIf6KIShVtLpw1USGwLjWOj7v4/1zX/RA4wUDbTzueEraVAm9bNVhxBjmCQ0RowbH3Sn0LKTzoAOaGvC4ROg7xCVGSYKHg05ISLvKK6NlpCc0XGnhYwJx6P2YNWjzmazis8Ulalgigc4u/0velGv7jSuXM+zKLOgW7Eks5ZcLEStzOpayXgYoDhBcHNpJ5GCjuUn+PlrNp53FG3JgRDmP9b+cC5dr9FYGcQ4JCpu5/OtRnMm7bw5G5gGxxEwpiyvHwOz85EJiIyR8zYYNE7yh5aNOi17sU7jDqIaBDiRuRHBwpgSZLxx/f/Jon4r7IJeTKXlZ9sKm7xMIon/0Yl7ugzS2TOgO9NmW5x3hbZIv7IgfFquHLjCu1VMVyvPuI5+oN+WZemPnSs6O7vftqgvPy//s/lX9PCiFvTDTR3okQ0N/HJ2tivs98W4v7KMrWl9NLdKtin2tl1ZV0o/zevx8zchJQr2JBMAAA==", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Last-Modified": ["Fri, 03 Aug 2018 07:31:39 GMT"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["95AE:3FAE:2C780A7:52C3D04:5B640BE5"], "ETag": ["W/\"b4505c5c6261cbcfd05fc36ec2685139\""], "Date": ["Fri, 03 Aug 2018 08:01:41 GMT"], "X-RateLimit-Remaining": ["4997"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.080789"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": ["repo"], "X-RateLimit-Reset": ["1533285098"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "recorded_at": "2018-08-03T08:01:41"}, {"request": {"body": {"string": "{\"assignees\": []}", "encoding": "utf-8"}, "headers": {"Content-Length": ["17"], "Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.v3.full+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"], "Authorization": ["token <AUTH_TOKEN>"]}, "method": "PATCH", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA8VW/W/iNhj+V7xIm7aqIQRSaHOBatNu0n6g3Wn0erpyQk5igq+JHfkDlqL+73vtBAaI9npcT5OIIPb7Pn7er8esHC1yJ3TmSpUy9Dxc0lZG1VzHrYQXniAll56kWYEXVGjZCbx6t9sqK49KqYn0+r7vnDrWlCouqunxkICT45jk8hswtmh5NdjKY7ggjwAOMRWEqdeCX8MBMlm8Im4NBqhzVeR7qdiqzovqQlMn7HTPzi6C4MI/dRhPydSsOaPfb3p/f7h6SG9vqtHDjX89fte+Hr8dwLFMFzERTgiVPXUUVTkB+yuOlrhCiiPNsISzGcIM2R4AFy2Nw8rJeUYZWBeEqAKzTN/TCsO+ObLf67TPut1dEu967z9c5cnnpDP6/Ks/ehgZAniBFRb7TWAXZbtpUHNiwpmCvNte1V6Df7kYBICRiQbFRmsoPtfpBk16+6yfz/++9YznOV8Czj7x3aE6cJS3cQWe9W/KsmNhwHXlcTUnkEEIyzR+RuWXuv4QLeu2gpmXCprGAEkogSDp11NrHIHYkgGnldUWi6hjmQhaKsrZEZnbcQc4LjLM6AM+Eg7cJaBYNfv6IK0buL9EDQ7lu/ZbeaWgC5xUJj2CJIQuIOPHYu4BAKSqSjPQN2ZkIf9UkSlOCzO2M5xL8rgWYSe8+2Qrrow5LwkD85wn9wQmyprCpFolIGDAdJ7/9y5r54LmRCrONvsbyQw7IMeCAHY6xQrwO22/77Z7rt8d+2dhpx8G/kc4T5fpjs2524ZPd9w+D9t+Y5PkXDYwDQut5lxMgRxPqG0FI2DXV28BMOZpNTWDDUtRORzDmBAkCSmk0baYGFVr9G2mWWKcT1GsFWJcmb2N+hUEDknRjAv0p1HByCuHEzZh0VzAN0DfYqYsJk7ukZpTWavlJYqkEpxlwwijuSCzwWRz/y6Xy1bMNVOV5FokxCpbc88GPb9/0Q0Cl3EXlNhV3F1zcTFzrdWlVsU0wUWJoSqDMtegxj/B6xu7XqvlQAkgRMSPnT/8827Q728MCpJSXQzq8zarNZNBffFMHCRIDowZr3Vq4gz/4hIyg2reiMOloH6IPDyMvCZOdAtZTRJSqtqIEokWFKMXxv+/BPXbVhVMMK0otmWFIq+bSJF/TOO+XgeZ7pmwJ9vmUJ6fSu02/VZD+HW5CuAKT7Tdrm7dcUOzYR7Xdc3XkxGdnNwdCurTz+v/o99lHp6dBbO5OwdmZWcGfjk5eSrtd9t5/0IYB9v6m7m1bFG8Q1WBNmiUMq5qvX78F2GbEbsADAAA", "encoding": "utf-8"}, "headers": {"X-XSS-Protection": ["1; mode=block"], "Content-Security-Policy": ["default-src 'none'"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "Access-Control-Allow-Origin": ["*"], "X-Frame-Options": ["deny"], "Status": ["200 OK"], "X-GitHub-Request-Id": ["95AE:3FAE:2C780CD:52C3D3D:5B640BE5"], "ETag": ["W/\"d2e139e7445377dc3ca16b5e4bf28495\""], "Date": ["Fri, 03 Aug 2018 08:01:41 GMT"], "X-RateLimit-Remaining": ["4996"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "Server": ["GitHub.com"], "X-OAuth-Scopes": ["public_repo, read:user"], "X-GitHub-Media-Type": ["github.v3; param=full; format=json"], "X-Content-Type-Options": ["nosniff"], "Content-Encoding": ["gzip"], "X-Runtime-rack": ["0.273701"], "Vary": ["Accept, Authorization, Cookie, X-GitHub-OTP"], "X-RateLimit-Limit": ["5000"], "Cache-Control": ["private, max-age=60, s-maxage=60"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Accepted-OAuth-Scopes": [""], "X-RateLimit-Reset": ["1533285098"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/issues/711"}, "recorded_at": "2018-08-03T08:01:41"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/integration/test_issue.py b/tests/integration/test_issue.py index d9aadf2e..fc5426ad 100644 --- a/tests/integration/test_issue.py +++ b/tests/integration/test_issue.py @@ -11,6 +11,18 @@ class TestIssue(IntegrationHelper): """Integration tests for methods on the Issue class.""" + def test_add_assignees(self): + """Test the ability to add assignees to an issue.""" + self.auto_login() + cassette_name = self.cassette_name('add_assignees') + with self.recorder.use_cassette(cassette_name): + issue = self.gh.issue(username='sigmavirus24', + repository='github3.py', + number=711) + assigned = issue.add_assignees(['jacquerie']) + + assert assigned is True + def test_add_labels(self): """Test the ability to add a label to an issue.""" self.auto_login() @@ -203,6 +215,18 @@ class TestIssue(IntegrationHelper): assert reopened is True + def test_remove_assignees(self): + """Test the ability to remove assignees from an issue.""" + self.auto_login() + cassette_name = self.cassette_name('remove_assignees') + with self.recorder.use_cassette(cassette_name): + issue = self.gh.issue(username='sigmavirus24', + repository='github3.py', + number=711) + unassigned = issue.remove_assignees(['jacquerie']) + + assert unassigned is True + def test_remove_label(self): """Test the ability to remove a label from an issue.""" self.auto_login() diff --git a/tests/unit/test_issues_issue.py b/tests/unit/test_issues_issue.py index f8f1f0e0..f67cca29 100644 --- a/tests/unit/test_issues_issue.py +++ b/tests/unit/test_issues_issue.py @@ -44,6 +44,10 @@ class TestIssueRequiresAuth(helper.UnitRequiresAuthenticationHelper): def after_setup(self): self.session.has_auth.return_value = False + def test_add_assignees(self): + """Verify that adding assignees requires authentication.""" + self.assert_requires_auth(self.instance.add_assignees) + def test_add_labels(self): """Verify that adding a label requires authentication.""" self.assert_requires_auth(self.instance.add_labels, 'enhancement') @@ -73,6 +77,10 @@ class TestIssueRequiresAuth(helper.UnitRequiresAuthenticationHelper): """Verify that removing all labels requires authentication.""" self.assert_requires_auth(self.instance.remove_all_labels) + def test_remove_assignees(self): + """Verify that removing assignees requires authentication.""" + self.assert_requires_auth(self.instance.remove_assignees) + def test_remove_label(self): """Verify that removing a label requires authentication.""" self.assert_requires_auth(self.instance.remove_label, 'enhancement') @@ -92,6 +100,15 @@ class TestIssue(helper.UnitHelper): described_class = github3.issues.Issue example_data = get_issue_example_data() + def test_add_assignees(self): + """Verify the request for adding assignees to an issue.""" + self.instance.add_assignees(['jacquerie']) + + self.session.patch.assert_called_with( + url_for(), + data='{"assignees": ["jacquerie"]}' + ) + def test_add_labels(self): """Verify the request for adding a label.""" self.instance.add_labels('enhancement') @@ -294,6 +311,15 @@ class TestIssue(helper.UnitHelper): assert self.instance.remove_all_labels() == [] replace_labels.assert_called_once_with([]) + def test_remove_assignees(self): + """Verify the request for removing assignees from an issue.""" + self.instance.remove_assignees(['octocat']) + + self.session.patch.assert_called_once_with( + url_for(), + data='{"assignees": []}' + ) + def test_remove_label(self): """Verify the request for removing a label from an issue.""" self.instance.remove_label('enhancement')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "betamax", "betamax_matchers" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 betamax==0.8.1 betamax-matchers==0.4.0 certifi==2021.5.30 charset-normalizer==2.0.12 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 -e git+https://github.com/sigmavirus24/github3.py.git@87af5a1d26597d7cf1a843199a1b5a2449cd8069#egg=github3.py idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mock==1.0.1 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 swebench-matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 uritemplate==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: github3.py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - betamax==0.8.1 - betamax-matchers==0.4.0 - charset-normalizer==2.0.12 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mock==1.0.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - uritemplate==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - wheel==0.21.0 - zipp==3.6.0 prefix: /opt/conda/envs/github3.py
[ "tests/integration/test_issue.py::TestIssue::test_add_assignees", "tests/integration/test_issue.py::TestIssue::test_remove_assignees", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_add_assignees", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_remove_assignees", "tests/unit/test_issues_issue.py::TestIssue::test_add_assignees", "tests/unit/test_issues_issue.py::TestIssue::test_remove_assignees" ]
[]
[ "tests/integration/test_issue.py::TestIssue::test_add_labels", "tests/integration/test_issue.py::TestIssue::test_assign", "tests/integration/test_issue.py::TestIssue::test_closed", "tests/integration/test_issue.py::TestIssue::test_comment", "tests/integration/test_issue.py::TestIssue::test_comments", "tests/integration/test_issue.py::TestIssue::test_create_comment", "tests/integration/test_issue.py::TestIssue::test_edit", "tests/integration/test_issue.py::TestIssue::test_edit_both_assignee_and_assignees", "tests/integration/test_issue.py::TestIssue::test_edit_multiple_assignees", "tests/integration/test_issue.py::TestIssue::test_events", "tests/integration/test_issue.py::TestIssue::test_labels", "tests/integration/test_issue.py::TestIssue::test_lock", "tests/integration/test_issue.py::TestIssue::test_pull_request", "tests/integration/test_issue.py::TestIssue::test_remove_all_labels", "tests/integration/test_issue.py::TestIssue::test_remove_label", "tests/integration/test_issue.py::TestIssue::test_reopen", "tests/integration/test_issue.py::TestIssue::test_replace_labels", "tests/integration/test_issue.py::TestIssue::test_unlock", "tests/integration/test_issue.py::TestLabel::test_delete", "tests/integration/test_issue.py::TestLabel::test_update", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_add_labels", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_assign", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_close", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_create_comment", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_edit_comment", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_lock", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_remove_all_labels", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_remove_label", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_reopen", "tests/unit/test_issues_issue.py::TestIssueRequiresAuth::test_unlock", "tests/unit/test_issues_issue.py::TestIssue::test_add_labels", "tests/unit/test_issues_issue.py::TestIssue::test_assign", "tests/unit/test_issues_issue.py::TestIssue::test_assign_empty_username", "tests/unit/test_issues_issue.py::TestIssue::test_close", "tests/unit/test_issues_issue.py::TestIssue::test_close_with_unicode_labels", "tests/unit/test_issues_issue.py::TestIssue::test_comment", "tests/unit/test_issues_issue.py::TestIssue::test_comment_positive_id", "tests/unit/test_issues_issue.py::TestIssue::test_create_comment", "tests/unit/test_issues_issue.py::TestIssue::test_create_comment_required_body", "tests/unit/test_issues_issue.py::TestIssue::test_create_lock", "tests/unit/test_issues_issue.py::TestIssue::test_edit", "tests/unit/test_issues_issue.py::TestIssue::test_edit_milestone", "tests/unit/test_issues_issue.py::TestIssue::test_edit_multiple_assignees", "tests/unit/test_issues_issue.py::TestIssue::test_edit_no_parameters", "tests/unit/test_issues_issue.py::TestIssue::test_enterprise", "tests/unit/test_issues_issue.py::TestIssue::test_equality", "tests/unit/test_issues_issue.py::TestIssue::test_is_closed", "tests/unit/test_issues_issue.py::TestIssue::test_issue_137", "tests/unit/test_issues_issue.py::TestIssue::test_pull_request", "tests/unit/test_issues_issue.py::TestIssue::test_pull_request_without_urls", "tests/unit/test_issues_issue.py::TestIssue::test_remove_all_labels", "tests/unit/test_issues_issue.py::TestIssue::test_remove_label", "tests/unit/test_issues_issue.py::TestIssue::test_remove_lock", "tests/unit/test_issues_issue.py::TestIssue::test_reopen", "tests/unit/test_issues_issue.py::TestIssue::test_replace_labels", "tests/unit/test_issues_issue.py::TestIssueIterators::test_comments", "tests/unit/test_issues_issue.py::TestIssueIterators::test_events", "tests/unit/test_issues_issue.py::TestIssueIterators::test_labels", "tests/unit/test_issues_issue.py::TestLabelRequiresAuth::test_delete", "tests/unit/test_issues_issue.py::TestLabelRequiresAuth::test_update", "tests/unit/test_issues_issue.py::TestLabel::test_delete", "tests/unit/test_issues_issue.py::TestLabel::test_equality", "tests/unit/test_issues_issue.py::TestLabel::test_repr", "tests/unit/test_issues_issue.py::TestLabel::test_str", "tests/unit/test_issues_issue.py::TestLabel::test_update", "tests/unit/test_issues_issue.py::TestLabel::test_update_without_description", "tests/unit/test_issues_issue.py::TestIssueEvent::test_assignee", "tests/unit/test_issues_issue.py::TestIssueEvent::test_created_at", "tests/unit/test_issues_issue.py::TestIssueEvent::test_equality", "tests/unit/test_issues_issue.py::TestIssueEvent::test_repr" ]
[]
BSD 3-Clause "New" or "Revised" License
2,862
[ "src/github3/issues/issue.py" ]
[ "src/github3/issues/issue.py" ]
python-cmd2__cmd2-494
bc559df2afcc51d1804e5d068d7e2c57bc4f72af
2018-08-03 00:54:21
60a212c1c585f0c4c06ffcfeb9882520af8dbf35
diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bda144e..fcc394f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,10 @@ * Bug Fixes * Fixed bug where ``preparse`` wasn't getting called * Enhancements - * Improved implementation of lifecycle hooks to to support a plugin + * Improved implementation of lifecycle hooks to support a plugin framework, see ``docs/hooks.rst`` for details. * New dependency on ``attrs`` third party module + * Added ``matches_sorted`` member to support custom sorting of tab-completion matches * Deprecations * Deprecated the following hook methods, see ``hooks.rst`` for full details: * ``cmd2.Cmd.preparse()`` - equivilent functionality available @@ -14,6 +15,10 @@ * ``cmd2.Cmd.postparsing_postcmd()`` - equivilent functionality available via ``cmd2.Cmd.register_postcmd_hook()`` +## 0.8.9 (August TBD, 2018) +* Bug Fixes + * Fixed extra slash that could print when tab completing users on Windows + ## 0.9.3 (July 12, 2018) * Bug Fixes * Fixed bug when StatementParser ``__init__()`` was called with ``terminators`` equal to ``None`` diff --git a/README.md b/README.md index a60f0a18..b2eb314c 100755 --- a/README.md +++ b/README.md @@ -34,7 +34,6 @@ Main Features - Ability to load commands at startup from an initialization script - Settable environment parameters - Parsing commands with arguments using `argparse`, including support for sub-commands -- Sub-menu support via the ``AddSubmenu`` decorator - Unicode character support - Good tab-completion of commands, sub-commands, file system paths, and shell commands - Support for Python 3.4+ on Windows, macOS, and Linux @@ -58,7 +57,7 @@ pip install -U cmd2 ``` cmd2 works with Python 3.4+ on Windows, macOS, and Linux. It is pure Python code with -the only 3rd-party dependencies being on [attrs](https://github.com/python-attrs/attrs), +the only 3rd-party dependencies being on [attrs](https://github.com/python-attrs/attrs), [colorama](https://github.com/tartley/colorama), and [pyperclip](https://github.com/asweigart/pyperclip). Windows has an additional dependency on [pyreadline](https://pypi.python.org/pypi/pyreadline). Non-Windows platforms have an additional dependency on [wcwidth](https://pypi.python.org/pypi/wcwidth). Finally, Python diff --git a/cmd2/argparse_completer.py b/cmd2/argparse_completer.py index 60af25de..1479a6bf 100755 --- a/cmd2/argparse_completer.py +++ b/cmd2/argparse_completer.py @@ -492,6 +492,7 @@ class AutoCompleter(object): self._cmd2_app.completion_header = header self._cmd2_app.display_matches = completions_with_desc + self._cmd2_app.matches_sorted = True return completions diff --git a/cmd2/cmd2.py b/cmd2/cmd2.py index d34e7161..7273286b 100644 --- a/cmd2/cmd2.py +++ b/cmd2/cmd2.py @@ -369,7 +369,7 @@ class Cmd(cmd.Cmd): except AttributeError: pass - # initialize plugin system + # initialize plugin system # needs to be done before we call __init__(0) self._initialize_plugin_system() @@ -482,11 +482,11 @@ class Cmd(cmd.Cmd): # in reset_completion_defaults() and it is up to completer functions to set them before returning results. ############################################################################################################ - # If true and a single match is returned to complete(), then a space will be appended + # If True and a single match is returned to complete(), then a space will be appended # if the match appears at the end of the line self.allow_appended_space = True - # If true and a single match is returned to complete(), then a closing quote + # If True and a single match is returned to complete(), then a closing quote # will be added if there is an unmatched opening quote self.allow_closing_quote = True @@ -504,6 +504,10 @@ class Cmd(cmd.Cmd): # quote matches that are completed in a delimited fashion self.matches_delimited = False + # Set to True before returning matches to complete() in cases where matches are sorted with custom ordering. + # If False, then complete() will sort the matches alphabetically before they are displayed. + self.matches_sorted = False + # Set the pager(s) for use with the ppaged() method for displaying output using a pager if sys.platform.startswith('win'): self.pager = self.pager_chop = 'more' @@ -678,6 +682,7 @@ class Cmd(cmd.Cmd): self.completion_header = '' self.display_matches = [] self.matches_delimited = False + self.matches_sorted = False if rl_type == RlType.GNU: readline.set_completion_display_matches_hook(self._display_matches_gnu_readline) @@ -994,12 +999,15 @@ class Cmd(cmd.Cmd): users = [] # Windows lacks the pwd module so we can't get a list of users. - # Instead we will add a slash once the user enters text that + # Instead we will return a result once the user enters text that # resolves to an existing home directory. if sys.platform.startswith('win'): expanded_path = os.path.expanduser(text) if os.path.isdir(expanded_path): - users.append(text + os.path.sep) + user = text + if add_trailing_sep_if_dir: + user += os.path.sep + users.append(user) else: import pwd @@ -1083,6 +1091,10 @@ class Cmd(cmd.Cmd): self.allow_appended_space = False self.allow_closing_quote = False + # Sort the matches before any trailing slashes are added + matches = utils.alphabetical_sort(matches) + self.matches_sorted = True + # Build display_matches and add a slash to directories for index, cur_match in enumerate(matches): @@ -1446,11 +1458,8 @@ class Cmd(cmd.Cmd): if self.completion_matches: # Eliminate duplicates - matches_set = set(self.completion_matches) - self.completion_matches = list(matches_set) - - display_matches_set = set(self.display_matches) - self.display_matches = list(display_matches_set) + self.completion_matches = utils.remove_duplicates(self.completion_matches) + self.display_matches = utils.remove_duplicates(self.display_matches) if not self.display_matches: # Since self.display_matches is empty, set it to self.completion_matches @@ -1521,10 +1530,11 @@ class Cmd(cmd.Cmd): self.completion_matches[0] += str_to_append - # Otherwise sort matches - elif self.completion_matches: - self.completion_matches.sort() - self.display_matches.sort() + # Sort matches alphabetically if they haven't already been sorted + if not self.matches_sorted: + self.completion_matches = utils.alphabetical_sort(self.completion_matches) + self.display_matches = utils.alphabetical_sort(self.display_matches) + self.matches_sorted = True try: return self.completion_matches[state] @@ -2270,7 +2280,7 @@ Usage: Usage: unalias [-a] name [name ...] else: # Get rid of duplicates - arglist = list(set(arglist)) + arglist = utils.remove_duplicates(arglist) for cur_arg in arglist: if cur_arg in self.aliases: @@ -2315,12 +2325,10 @@ Usage: Usage: unalias [-a] name [name ...] """Show a list of commands which help can be displayed for. """ # Get a sorted list of help topics - help_topics = self.get_help_topics() - help_topics.sort() + help_topics = utils.alphabetical_sort(self.get_help_topics()) # Get a sorted list of visible command names - visible_commands = self.get_visible_commands() - visible_commands.sort() + visible_commands = utils.alphabetical_sort(self.get_visible_commands()) cmds_doc = [] cmds_undoc = [] diff --git a/cmd2/utils.py b/cmd2/utils.py index d03e7f6f..02956f6b 100644 --- a/cmd2/utils.py +++ b/cmd2/utils.py @@ -5,6 +5,7 @@ import collections import os from typing import Any, List, Optional, Union +import unicodedata from . import constants @@ -110,7 +111,7 @@ def which(editor: str) -> Optional[str]: def is_text_file(file_path: str) -> bool: - """Returns if a file contains only ASCII or UTF-8 encoded text + """Returns if a file contains only ASCII or UTF-8 encoded text. :param file_path: path to the file being checked :return: True if the file is a text file, False if it is binary. @@ -144,3 +145,34 @@ def is_text_file(file_path: str) -> bool: pass return valid_text_file + + +def remove_duplicates(list_to_prune: List) -> List: + """Removes duplicates from a list while preserving order of the items. + + :param list_to_prune: the list being pruned of duplicates + :return: The pruned list + """ + temp_dict = collections.OrderedDict() + for item in list_to_prune: + temp_dict[item] = None + + return list(temp_dict.keys()) + + +def norm_fold(astr: str) -> str: + """Normalize and casefold Unicode strings for saner comparisons. + + :param astr: input unicode string + :return: a normalized and case-folded version of the input string + """ + return unicodedata.normalize('NFC', astr).casefold() + + +def alphabetical_sort(list_to_sort: List[str]) -> List[str]: + """Sorts a list of strings alphabetically. + + :param list_to_sort: the list being sorted + :return: the sorted list + """ + return sorted(list_to_sort, key=norm_fold) diff --git a/examples/tab_autocompletion.py b/examples/tab_autocompletion.py index 342cfff5..38972358 100755 --- a/examples/tab_autocompletion.py +++ b/examples/tab_autocompletion.py @@ -38,7 +38,7 @@ class TabCompleteExample(cmd2.Cmd): static_list_directors = ['J. J. Abrams', 'Irvin Kershner', 'George Lucas', 'Richard Marquand', 'Rian Johnson', 'Gareth Edwards'] USER_MOVIE_LIBRARY = ['ROGUE1', 'SW_EP04', 'SW_EP05'] - MOVIE_DATABASE_IDS = ['SW_EP01', 'SW_EP02', 'SW_EP03', 'ROGUE1', 'SW_EP04', + MOVIE_DATABASE_IDS = ['SW_EP1', 'SW_EP02', 'SW_EP03', 'ROGUE1', 'SW_EP04', 'SW_EP05', 'SW_EP06', 'SW_EP07', 'SW_EP08', 'SW_EP09'] MOVIE_DATABASE = {'SW_EP04': {'title': 'Star Wars: Episode IV - A New Hope', 'rating': 'PG', @@ -52,13 +52,13 @@ class TabCompleteExample(cmd2.Cmd): 'actor': ['Mark Hamill', 'Harrison Ford', 'Carrie Fisher', 'Alec Guinness', 'Peter Mayhew', 'Anthony Daniels'] }, - 'SW_EP06': {'title': 'Star Wars: Episode IV - A New Hope', + 'SW_EP06': {'title': 'Star Wars: Episode VI - Return of the Jedi', 'rating': 'PG', 'director': ['Richard Marquand'], 'actor': ['Mark Hamill', 'Harrison Ford', 'Carrie Fisher', 'Alec Guinness', 'Peter Mayhew', 'Anthony Daniels'] }, - 'SW_EP01': {'title': 'Star Wars: Episode I - The Phantom Menace', + 'SW_EP1': {'title': 'Star Wars: Episode I - The Phantom Menace', 'rating': 'PG', 'director': ['George Lucas'], 'actor': ['Liam Neeson', 'Ewan McGregor', 'Natalie Portman', 'Jake Lloyd'] @@ -113,8 +113,10 @@ class TabCompleteExample(cmd2.Cmd): """Demonstrates showing tabular hinting of tab completion information""" completions_with_desc = [] - for movie_id, movie_entry in self.MOVIE_DATABASE.items(): - completions_with_desc.append(argparse_completer.CompletionItem(movie_id, movie_entry['title'])) + for movie_id in self.MOVIE_DATABASE_IDS: + if movie_id in self.MOVIE_DATABASE: + movie_entry = self.MOVIE_DATABASE[movie_id] + completions_with_desc.append(argparse_completer.CompletionItem(movie_id, movie_entry['title'])) return completions_with_desc
Add ability to custom sort tab-completion matches Currently the `complete()` method sorts tab-completion matches in all cases where there is more than one match. There are cases where a user has a custom sort order and already returns the matches to `complete()` in that preferred order. Add a flag like `self.matches_already_sorted` that overrides the sorting in `complete()`.
python-cmd2/cmd2
diff --git a/tests/test_completion.py b/tests/test_completion.py index 2faa4a08..00a120cc 100644 --- a/tests/test_completion.py +++ b/tests/test_completion.py @@ -14,6 +14,7 @@ import sys import pytest import cmd2 +from cmd2 import utils from .conftest import complete_tester, StdOut from examples.subcommands import SubcommandsExample @@ -251,7 +252,7 @@ def test_path_completion_multiple(cmd2_app, request): endidx = len(line) begidx = endidx - len(text) - matches = sorted(cmd2_app.path_complete(text, line, begidx, endidx)) + matches = cmd2_app.path_complete(text, line, begidx, endidx) expected = [text + 'cript.py', text + 'cript.txt', text + 'cripts' + os.path.sep] assert matches == expected @@ -408,9 +409,8 @@ def test_delimiter_completion(cmd2_app): cmd2_app.delimiter_complete(text, line, begidx, endidx, delimited_strs, '/') # Remove duplicates from display_matches and sort it. This is typically done in complete(). - display_set = set(cmd2_app.display_matches) - display_list = list(display_set) - display_list.sort() + display_list = utils.remove_duplicates(cmd2_app.display_matches) + display_list = utils.alphabetical_sort(display_list) assert display_list == ['other user', 'user'] diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..61fd8373 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,52 @@ +# coding=utf-8 +""" +Unit testing for cmd2/utils.py module. + +Copyright 2018 Todd Leonhardt <[email protected]> +Released under MIT license, see LICENSE file +""" +from colorama import Fore +import cmd2.utils as cu + +HELLO_WORLD = 'Hello, world!' + + +def test_strip_ansi(): + base_str = HELLO_WORLD + ansi_str = Fore.GREEN + base_str + Fore.RESET + assert base_str != ansi_str + assert base_str == cu.strip_ansi(ansi_str) + +def test_strip_quotes_no_quotes(): + base_str = HELLO_WORLD + stripped = cu.strip_quotes(base_str) + assert base_str == stripped + +def test_strip_quotes_with_quotes(): + base_str = '"' + HELLO_WORLD + '"' + stripped = cu.strip_quotes(base_str) + assert stripped == HELLO_WORLD + +def test_remove_duplicates_no_duplicates(): + no_dups = [5, 4, 3, 2, 1] + assert cu.remove_duplicates(no_dups) == no_dups + +def test_remove_duplicates_with_duplicates(): + duplicates = [1, 1, 2, 3, 9, 9, 7, 8] + assert cu.remove_duplicates(duplicates) == [1, 2, 3, 9, 7, 8] + +def test_unicode_normalization(): + s1 = 'café' + s2 = 'cafe\u0301' + assert s1 != s2 + assert cu.norm_fold(s1) == cu.norm_fold(s2) + +def test_unicode_casefold(): + micro = 'µ' + micro_cf = micro.casefold() + assert micro != micro_cf + assert cu.norm_fold(micro) == cu.norm_fold(micro_cf) + +def test_alphabetical_sort(): + my_list = ['café', 'µ', 'A' , 'micro', 'unity', 'cafeteria'] + assert cu.alphabetical_sort(my_list) == ['A', 'cafeteria', 'café', 'micro', 'unity', 'µ']
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 6 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-mock" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 anyio==4.9.0 astroid==3.3.9 attrs==25.3.0 babel==2.17.0 backports.tarfile==1.2.0 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 -e git+https://github.com/python-cmd2/cmd2.git@bc559df2afcc51d1804e5d068d7e2c57bc4f72af#egg=cmd2 codecov==2.1.13 colorama==0.4.6 coverage==7.8.0 cryptography==44.0.2 dill==0.3.9 distlib==0.3.9 docutils==0.21.2 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work filelock==3.18.0 h11==0.14.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work invoke==2.2.0 isort==6.0.1 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mccabe==0.7.0 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging @ file:///croot/packaging_1734472117206/work platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work pycparser==2.22 Pygments==2.19.1 pylint==3.3.6 pyperclip==1.9.0 pyproject-api==1.9.0 pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 pytest-mock==3.14.0 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 sniffio==1.3.1 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinx-autobuild==2024.10.3 sphinx-rtd-theme==3.0.2 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 starlette==0.46.1 tomli==2.2.1 tomlkit==0.13.2 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 uvicorn==0.34.0 virtualenv==20.29.3 watchfiles==1.0.4 wcwidth==0.2.13 websockets==15.0.1 zipp==3.21.0
name: cmd2 channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - anyio==4.9.0 - astroid==3.3.9 - attrs==25.3.0 - babel==2.17.0 - backports-tarfile==1.2.0 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - codecov==2.1.13 - colorama==0.4.6 - coverage==7.8.0 - cryptography==44.0.2 - dill==0.3.9 - distlib==0.3.9 - docutils==0.21.2 - filelock==3.18.0 - h11==0.14.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - invoke==2.2.0 - isort==6.0.1 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mccabe==0.7.0 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - platformdirs==4.3.7 - pycparser==2.22 - pygments==2.19.1 - pylint==3.3.6 - pyperclip==1.9.0 - pyproject-api==1.9.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - sniffio==1.3.1 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinx-autobuild==2024.10.3 - sphinx-rtd-theme==3.0.2 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - starlette==0.46.1 - tomli==2.2.1 - tomlkit==0.13.2 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - uvicorn==0.34.0 - virtualenv==20.29.3 - watchfiles==1.0.4 - wcwidth==0.2.13 - websockets==15.0.1 - zipp==3.21.0 prefix: /opt/conda/envs/cmd2
[ "tests/test_completion.py::test_path_completion_multiple", "tests/test_completion.py::test_delimiter_completion", "tests/test_utils.py::test_remove_duplicates_no_duplicates", "tests/test_utils.py::test_remove_duplicates_with_duplicates", "tests/test_utils.py::test_unicode_normalization", "tests/test_utils.py::test_unicode_casefold", "tests/test_utils.py::test_alphabetical_sort" ]
[]
[ "tests/test_completion.py::test_cmd2_command_completion_single", "tests/test_completion.py::test_complete_command_single", "tests/test_completion.py::test_complete_empty_arg", "tests/test_completion.py::test_complete_bogus_command", "tests/test_completion.py::test_cmd2_command_completion_multiple", "tests/test_completion.py::test_cmd2_command_completion_nomatch", "tests/test_completion.py::test_cmd2_help_completion_single", "tests/test_completion.py::test_cmd2_help_completion_multiple", "tests/test_completion.py::test_cmd2_help_completion_nomatch", "tests/test_completion.py::test_shell_command_completion_shortcut", "tests/test_completion.py::test_shell_command_completion_doesnt_match_wildcards", "tests/test_completion.py::test_shell_command_completion_multiple", "tests/test_completion.py::test_shell_command_completion_nomatch", "tests/test_completion.py::test_shell_command_completion_doesnt_complete_when_just_shell", "tests/test_completion.py::test_shell_command_completion_does_path_completion_when_after_command", "tests/test_completion.py::test_path_completion_single_end", "tests/test_completion.py::test_path_completion_nomatch", "tests/test_completion.py::test_default_to_shell_completion", "tests/test_completion.py::test_path_completion_cwd", "tests/test_completion.py::test_path_completion_doesnt_match_wildcards", "tests/test_completion.py::test_path_completion_complete_user", "tests/test_completion.py::test_path_completion_user_path_expansion", "tests/test_completion.py::test_path_completion_directories_only", "tests/test_completion.py::test_basic_completion_single", "tests/test_completion.py::test_basic_completion_multiple", "tests/test_completion.py::test_basic_completion_nomatch", "tests/test_completion.py::test_flag_based_completion_single", "tests/test_completion.py::test_flag_based_completion_multiple", "tests/test_completion.py::test_flag_based_completion_nomatch", "tests/test_completion.py::test_flag_based_default_completer", "tests/test_completion.py::test_flag_based_callable_completer", "tests/test_completion.py::test_index_based_completion_single", "tests/test_completion.py::test_index_based_completion_multiple", "tests/test_completion.py::test_index_based_completion_nomatch", "tests/test_completion.py::test_index_based_default_completer", "tests/test_completion.py::test_index_based_callable_completer", "tests/test_completion.py::test_tokens_for_completion_quoted", "tests/test_completion.py::test_tokens_for_completion_unclosed_quote", "tests/test_completion.py::test_tokens_for_completion_redirect", "tests/test_completion.py::test_tokens_for_completion_quoted_redirect", "tests/test_completion.py::test_tokens_for_completion_redirect_off", "tests/test_completion.py::test_add_opening_quote_basic_no_text", "tests/test_completion.py::test_add_opening_quote_basic_nothing_added", "tests/test_completion.py::test_add_opening_quote_basic_quote_added", "tests/test_completion.py::test_add_opening_quote_basic_text_is_common_prefix", "tests/test_completion.py::test_add_opening_quote_delimited_no_text", "tests/test_completion.py::test_add_opening_quote_delimited_nothing_added", "tests/test_completion.py::test_add_opening_quote_delimited_quote_added", "tests/test_completion.py::test_add_opening_quote_delimited_text_is_common_prefix", "tests/test_completion.py::test_add_opening_quote_delimited_space_in_prefix", "tests/test_completion.py::test_cmd2_subcommand_completion_single_end", "tests/test_completion.py::test_cmd2_subcommand_completion_multiple", "tests/test_completion.py::test_cmd2_subcommand_completion_nomatch", "tests/test_completion.py::test_cmd2_help_subcommand_completion_single", "tests/test_completion.py::test_cmd2_help_subcommand_completion_multiple", "tests/test_completion.py::test_cmd2_help_subcommand_completion_nomatch", "tests/test_completion.py::test_subcommand_tab_completion", "tests/test_completion.py::test_subcommand_tab_completion_with_no_completer", "tests/test_completion.py::test_subcommand_tab_completion_space_in_text", "tests/test_completion.py::test_cmd2_subcmd_with_unknown_completion_single_end", "tests/test_completion.py::test_cmd2_subcmd_with_unknown_completion_multiple", "tests/test_completion.py::test_cmd2_subcmd_with_unknown_completion_nomatch", "tests/test_completion.py::test_cmd2_help_subcommand_completion_single_scu", "tests/test_completion.py::test_cmd2_help_subcommand_completion_multiple_scu", "tests/test_completion.py::test_cmd2_help_subcommand_completion_nomatch_scu", "tests/test_completion.py::test_subcommand_tab_completion_scu", "tests/test_completion.py::test_subcommand_tab_completion_with_no_completer_scu", "tests/test_completion.py::test_subcommand_tab_completion_space_in_text_scu", "tests/test_utils.py::test_strip_ansi", "tests/test_utils.py::test_strip_quotes_no_quotes", "tests/test_utils.py::test_strip_quotes_with_quotes" ]
[]
MIT License
2,863
[ "cmd2/cmd2.py", "examples/tab_autocompletion.py", "CHANGELOG.md", "README.md", "cmd2/argparse_completer.py", "cmd2/utils.py" ]
[ "cmd2/cmd2.py", "examples/tab_autocompletion.py", "CHANGELOG.md", "README.md", "cmd2/argparse_completer.py", "cmd2/utils.py" ]
python-metar__python-metar-43
6b5dcc358c1a7b2c46679e83d7ef5f3af25e4f2e
2018-08-03 02:30:55
94a48ea3b965ed1b38c5ab52553dd4cbcc23867c
diff --git a/metar/Metar.py b/metar/Metar.py index 0e773cb..338e172 100755 --- a/metar/Metar.py +++ b/metar/Metar.py @@ -587,7 +587,7 @@ class Metar(object): self.vis_dir = direction(vis_dir) self.vis = distance(vis_dist, vis_units, vis_less) - def _handleRunway( self, d ): + def _handleRunway(self, d): """ Parse a runway visual range group. @@ -596,15 +596,17 @@ class Metar(object): . name [string] . low [distance] . high [distance] - """ - if d['name']: - name = d['name'] - low = distance(d['low']) - if d['high']: - high = distance(d['high']) - else: - high = low - self.runway.append((name,low,high)) + . unit [string] + """ + if d['name'] is None: + return + unit = d['unit'] if d['unit'] is not None else 'FT' + low = distance(d['low'], unit) + if d['high'] is None: + high = low + else: + high = distance(d['high'], unit) + self.runway.append([d['name'], low, high, unit]) def _handleWeather( self, d ): """ @@ -1119,16 +1121,23 @@ class Metar(object): text += "; %s" % self.max_vis.string(units) return text - def runway_visual_range( self, units=None ): + def runway_visual_range(self, units=None): """ Return a textual description of the runway visual range. """ lines = [] - for name,low,high in self.runway: + for name, low, high, unit in self.runway: + reportunits = unit if units is None else units if low != high: - lines.append("on runway %s, from %d to %s" % (name, low.value(units), high.string(units))) + lines.append( + ("on runway %s, from %d to %s" + ) % (name, low.value(reportunits), + high.string(reportunits)) + ) else: - lines.append("on runway %s, %s" % (name, low.string(units))) + lines.append( + "on runway %s, %s" % (name, low.string(reportunits)) + ) return "; ".join(lines) def present_weather( self ):
Feet vs. Meters This METAR has R28L/2600FT but reduction reads: visual range: on runway 28L, 2600 meters (should read: visual range: on runway 28L, 2600 feet) Reference: METAR KPIT 091955Z COR 22015G25KT 3/4SM R28L/2600FT TSRA OVC010CB 18/16 A2992 RMK SLP045 T01820159
python-metar/python-metar
diff --git a/test/test_metar.py b/test/test_metar.py index 76147e1..e6c8fee 100644 --- a/test/test_metar.py +++ b/test/test_metar.py @@ -17,6 +17,17 @@ class MetarTest(unittest.TestCase): def raisesParserError(self, code): self.assertRaises(Metar.ParserError, Metar.Metar, code ) + def test_issue40_runwayunits(self): + """Check reported units on runway visual range.""" + report = Metar.Metar( + "METAR KPIT 091955Z COR 22015G25KT 3/4SM R28L/2600FT TSRA OVC010CB " + "18/16 A2992 RMK SLP045 T01820159" + ) + res = report.runway_visual_range() + self.assertEquals(res, 'on runway 28L, 2600 feet') + res = report.runway_visual_range('M') + self.assertTrue(res, 'on runway 28L, 792 meters') + def test_010_parseType_default(self): """Check default value of the report type.""" self.assertEqual( Metar.Metar("KEWR").type, "METAR" )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
1.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/python-metar/python-metar.git@6b5dcc358c1a7b2c46679e83d7ef5f3af25e4f2e#egg=metar more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: python-metar channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 prefix: /opt/conda/envs/python-metar
[ "test/test_metar.py::MetarTest::test_issue40_runwayunits" ]
[]
[ "test/test_metar.py::MetarTest::test_010_parseType_default", "test/test_metar.py::MetarTest::test_011_parseType_legal", "test/test_metar.py::MetarTest::test_020_parseStation_legal", "test/test_metar.py::MetarTest::test_021_parseStation_illegal", "test/test_metar.py::MetarTest::test_030_parseTime_legal", "test/test_metar.py::MetarTest::test_031_parseTime_specify_year", "test/test_metar.py::MetarTest::test_032_parseTime_specify_month", "test/test_metar.py::MetarTest::test_033_parseTime_auto_month", "test/test_metar.py::MetarTest::test_034_parseTime_auto_year", "test/test_metar.py::MetarTest::test_035_parseTime_suppress_auto_month", "test/test_metar.py::MetarTest::test_040_parseModifier_default", "test/test_metar.py::MetarTest::test_041_parseModifier", "test/test_metar.py::MetarTest::test_042_parseModifier_nonstd", "test/test_metar.py::MetarTest::test_043_parseModifier_illegal", "test/test_metar.py::MetarTest::test_140_parseWind", "test/test_metar.py::MetarTest::test_141_parseWind_nonstd", "test/test_metar.py::MetarTest::test_142_parseWind_illegal", "test/test_metar.py::MetarTest::test_150_parseVisibility", "test/test_metar.py::MetarTest::test_151_parseVisibility_direction", "test/test_metar.py::MetarTest::test_152_parseVisibility_with_following_temperature", "test/test_metar.py::MetarTest::test_290_ranway_state", "test/test_metar.py::MetarTest::test_300_parseTrend", "test/test_metar.py::MetarTest::test_310_parse_sky_conditions", "test/test_metar.py::MetarTest::test_not_strict_mode", "test/test_metar.py::MetarTest::test_snowdepth" ]
[]
BSD License
2,864
[ "metar/Metar.py" ]
[ "metar/Metar.py" ]
HECBioSim__Longbow-107
585dc2198b0f3817e4822da13725489585efcf15
2018-08-03 14:42:57
c81fcaccfa7fb2dc147e40970ef806dc6d6b22a4
diff --git a/longbow/schedulers/lsf.py b/longbow/schedulers/lsf.py index ab20689..8992636 100644 --- a/longbow/schedulers/lsf.py +++ b/longbow/schedulers/lsf.py @@ -98,6 +98,10 @@ def prepare(job): jobfile.write("#BSUB -m " + job["lsf-cluster"] + "\n") + if job["memory"] is not "": + + jobfile.write('#BSUB -R "rusage[mem=' + job["memory"] + 'G]"\n') + # Account to charge (if supplied). if job["account"] is not "": diff --git a/longbow/schedulers/pbs.py b/longbow/schedulers/pbs.py index 1f490a5..dd5a1e1 100644 --- a/longbow/schedulers/pbs.py +++ b/longbow/schedulers/pbs.py @@ -131,16 +131,13 @@ def prepare(job): # Number of mpi processes per node. mpiprocs = cpn - # Memory size (used to select nodes with minimum memory). - memory = job["memory"] - tmp = "select=" + nodes + ":ncpus=" + ncpus + ":mpiprocs=" + mpiprocs # If user has specified memory append the flag (not all machines support # this). - if memory is not "": + if job["memory"] is not "": - tmp = tmp + ":mem=" + memory + "gb" + tmp = tmp + ":mem=" + job["memory"] + "gb" # Write the resource requests jobfile.write("#PBS -l " + tmp + "\n") diff --git a/longbow/schedulers/sge.py b/longbow/schedulers/sge.py index a249375..c1acd0f 100644 --- a/longbow/schedulers/sge.py +++ b/longbow/schedulers/sge.py @@ -102,6 +102,10 @@ def prepare(job): jobfile.write("#$ -l h_rt=" + job["maxtime"] + ":00\n") + if job["memory"] is not "": + + jobfile.write("#$ -l h_vmem=" + job["memory"] + "G\n") + # Email user. if job["email-address"] is not "": diff --git a/longbow/schedulers/slurm.py b/longbow/schedulers/slurm.py index 22b8661..752e83a 100644 --- a/longbow/schedulers/slurm.py +++ b/longbow/schedulers/slurm.py @@ -100,6 +100,10 @@ def prepare(job): jobfile.write("#SBATCH " + job["accountflag"] + " " + job["account"] + "\n") + if job["memory"] is not "": + + jobfile.write("#SBATCH --mem=" + job["memory"] + "G" + "\n") + # Generic resource (if supplied) if job["slurm-gres"] is not "": diff --git a/longbow/schedulers/soge.py b/longbow/schedulers/soge.py index a1d0223..9217a95 100644 --- a/longbow/schedulers/soge.py +++ b/longbow/schedulers/soge.py @@ -103,6 +103,10 @@ def prepare(job): jobfile.write("#$ -l h_rt=" + job["maxtime"] + ":00\n") + if job["memory"] is not "": + + jobfile.write("#$ -l h_vmem=" + job["memory"] + "G\n") + # Email user. if job["email-address"] is not "":
memory parameter only used in PBS Either rename this to pbs-memory to denote that it is pbs only or add support in the other schedulers.
HECBioSim/Longbow
diff --git a/tests/standards/lsf_submitfiles/case9.txt b/tests/standards/lsf_submitfiles/case9.txt new file mode 100644 index 0000000..60275d9 --- /dev/null +++ b/tests/standards/lsf_submitfiles/case9.txt @@ -0,0 +1,10 @@ +#!/bin/bash --login +#BSUB -J testjob +#BSUB -q debug +#BSUB -R "rusage[mem=10G]" +#BSUB -W 24:00 +#BSUB -n 24 + +module load amber + +mpiexec.hydra pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out diff --git a/tests/standards/sge_submitfiles/case8.txt b/tests/standards/sge_submitfiles/case8.txt new file mode 100644 index 0000000..f05a1be --- /dev/null +++ b/tests/standards/sge_submitfiles/case8.txt @@ -0,0 +1,9 @@ +#!/bin/bash --login +#$ -cwd -V +#$ -N testjob +#$ -q debug +#$ -l h_rt=24:00:00 +#$ -l h_vmem=10G +module load amber + +mpiexec pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out diff --git a/tests/standards/slurm_submitfiles/case8.txt b/tests/standards/slurm_submitfiles/case8.txt new file mode 100644 index 0000000..bcef936 --- /dev/null +++ b/tests/standards/slurm_submitfiles/case8.txt @@ -0,0 +1,15 @@ +#!/bin/bash --login +#SBATCH -J testjob +#SBATCH -p debug +#SBATCH --mem=10G +#SBATCH --gres=gpu:1 +#SBATCH -n 24 +#SBATCH -N 1 +#SBATCH -t 24:00:00 + +ls /dir +cd /dir + +module load amber + +mpirun pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out diff --git a/tests/standards/soge_submitfiles/case8.txt b/tests/standards/soge_submitfiles/case8.txt new file mode 100644 index 0000000..5166c90 --- /dev/null +++ b/tests/standards/soge_submitfiles/case8.txt @@ -0,0 +1,12 @@ +#!/bin/bash --login +#$ -cwd -V +#$ -N testjob +#$ -q debug +#$ -l h_rt=24:00:00 +#$ -l h_vmem=10G +#$ -l nodes=1 +#$ -pe ib 12 + +module load amber + +mpiexec pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out diff --git a/tests/unit/schedulers_lsf/test_lsf_prepare.py b/tests/unit/schedulers_lsf/test_lsf_prepare.py index 3a4d462..e22c2b9 100644 --- a/tests/unit/schedulers_lsf/test_lsf_prepare.py +++ b/tests/unit/schedulers_lsf/test_lsf_prepare.py @@ -55,6 +55,7 @@ def test_prepare_case1(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -89,6 +90,7 @@ def test_prepare_case2(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "5", @@ -121,6 +123,7 @@ def test_prepare_case3(): "localworkdir": "/tmp", "lsf-cluster": "cluster1", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -154,6 +157,7 @@ def test_prepare_case4(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -187,6 +191,7 @@ def test_prepare_case5(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -220,6 +225,7 @@ def test_prepare_case6(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -253,6 +259,7 @@ def test_prepare_case7(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -286,6 +293,7 @@ def test_prepare_case8(): "localworkdir": "/tmp", "lsf-cluster": "", "maxtime": "24:00", + "memory": "", "modules": "amber", "queue": "debug", "replicates": "1", @@ -299,3 +307,37 @@ def test_prepare_case8(): os.path.join( os.getcwd(), "tests/standards/lsf_submitfiles/case8.txt"), "rb").read() + + +def test_prepare_case9(): + + """ + Test handler parameters + """ + + job = { + "account": "", + "accountflag": "", + "cores": "24", + "executableargs": "pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out", + "handler": "mpiexec.hydra", + "email-address": "", + "email-flags": "", + "jobname": "testjob", + "localworkdir": "/tmp", + "lsf-cluster": "", + "maxtime": "24:00", + "memory": "10", + "modules": "amber", + "queue": "debug", + "replicates": "1", + "scripts": "", + "upload-include": "file1, file2" + } + + prepare(job) + + assert open("/tmp/submit.lsf", "rb").read() == open( + os.path.join( + os.getcwd(), + "tests/standards/lsf_submitfiles/case9.txt"), "rb").read() diff --git a/tests/unit/schedulers_sge/test_sge_prepare.py b/tests/unit/schedulers_sge/test_sge_prepare.py index f08aac8..a462308 100644 --- a/tests/unit/schedulers_sge/test_sge_prepare.py +++ b/tests/unit/schedulers_sge/test_sge_prepare.py @@ -295,3 +295,40 @@ def test_prepare_case7(): os.path.join( os.getcwd(), "tests/standards/sge_submitfiles/case7.txt"), "rb").read() + + +def test_prepare_case8(): + + """ + Test memory param + """ + + job = { + "account": "", + "accountflag": "", + "cluster": "", + "cores": "1", + "corespernode": "", + "executableargs": "pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out", + "handler": "mpiexec", + "email-address": "", + "email-flags": "", + "jobname": "testjob", + "localworkdir": "/tmp", + "maxtime": "24:00", + "memory": "10", + "modules": "amber", + "queue": "debug", + "replicates": "1", + "scripts": "", + "sge-peflag": "mpi", + "sge-peoverride": "false", + "upload-include": "file1, file2" + } + + prepare(job) + + assert open("/tmp/submit.sge", "rb").read() == open( + os.path.join( + os.getcwd(), + "tests/standards/sge_submitfiles/case8.txt"), "rb").read() diff --git a/tests/unit/schedulers_slurm/test_slurm_prepare.py b/tests/unit/schedulers_slurm/test_slurm_prepare.py index 9219865..f40bebe 100644 --- a/tests/unit/schedulers_slurm/test_slurm_prepare.py +++ b/tests/unit/schedulers_slurm/test_slurm_prepare.py @@ -302,3 +302,41 @@ def test_prepare_case7(): os.path.join( os.getcwd(), "tests/standards/slurm_submitfiles/case7.txt"), "rb").read() + + +def test_prepare_case8(): + + """ + Test gres parameters + """ + + job = { + "account": "", + "accountflag": "", + "cluster": "", + "cores": "24", + "corespernode": "24", + "executableargs": "pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out", + "handler": "mpirun", + "email-address": "", + "email-flags": "", + "jobname": "testjob", + "localworkdir": "/tmp", + "maxtime": "24:00", + "memory": "10", + "modules": "amber", + "queue": "debug", + "replicates": "1", + "scripts": "ls /dir, cd /dir", + "slurm-gres": "gpu:1", + "sge-peflag": "mpi", + "sge-peoverride": "false", + "upload-include": "file1, file2" + } + + prepare(job) + + assert open("/tmp/submit.slurm", "rb").read() == open( + os.path.join( + os.getcwd(), + "tests/standards/slurm_submitfiles/case8.txt"), "rb").read() diff --git a/tests/unit/schedulers_soge/test_soge_prepare.py b/tests/unit/schedulers_soge/test_soge_prepare.py index e8666c8..dbf9615 100644 --- a/tests/unit/schedulers_soge/test_soge_prepare.py +++ b/tests/unit/schedulers_soge/test_soge_prepare.py @@ -295,3 +295,40 @@ def test_prepare_case7(): os.path.join( os.getcwd(), "tests/standards/soge_submitfiles/case7.txt"), "rb").read() + + +def test_prepare_case8(): + + """ + Test under subscription + """ + + job = { + "account": "", + "accountflag": "", + "cluster": "", + "cores": "12", + "corespernode": "24", + "executableargs": "pmemd.MPI -O -i e.in -c e.min -p e.top -o e.out", + "handler": "mpiexec", + "email-address": "", + "email-flags": "", + "jobname": "testjob", + "localworkdir": "/tmp", + "maxtime": "24:00", + "memory": "10", + "modules": "amber", + "queue": "debug", + "replicates": "1", + "scripts": "", + "sge-peflag": "mpi", + "sge-peoverride": "false", + "upload-include": "file1, file2" + } + + prepare(job) + + assert open("/tmp/submit.soge", "rb").read() == open( + os.path.join( + os.getcwd(), + "tests/standards/soge_submitfiles/case8.txt"), "rb").read()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 5 }
.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "coverage", "coveralls" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 coveralls==3.3.1 docopt==0.6.2 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/HECBioSim/Longbow.git@585dc2198b0f3817e4822da13725489585efcf15#egg=Longbow packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: Longbow channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - coverage==6.2 - coveralls==3.3.1 - docopt==0.6.2 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/Longbow
[ "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case9", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case8", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case8", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case8" ]
[]
[ "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case1", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case2", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case3", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case4", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case5", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case6", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case7", "tests/unit/schedulers_lsf/test_lsf_prepare.py::test_prepare_case8", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case1", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case2", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case3", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case4", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case5", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case6", "tests/unit/schedulers_sge/test_sge_prepare.py::test_prepare_case7", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case1", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case2", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case3", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case4", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case5", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case6", "tests/unit/schedulers_slurm/test_slurm_prepare.py::test_prepare_case7", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case1", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case2", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case3", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case4", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case5", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case6", "tests/unit/schedulers_soge/test_soge_prepare.py::test_prepare_case7" ]
[]
BSD 3-Clause License
2,865
[ "longbow/schedulers/sge.py", "longbow/schedulers/slurm.py", "longbow/schedulers/lsf.py", "longbow/schedulers/pbs.py", "longbow/schedulers/soge.py" ]
[ "longbow/schedulers/sge.py", "longbow/schedulers/slurm.py", "longbow/schedulers/lsf.py", "longbow/schedulers/pbs.py", "longbow/schedulers/soge.py" ]
pennmem__cmlreaders-180
2ffd443b75a0095a5c1edaaadf6e6e764c0e8a78
2018-08-03 19:26:06
06f314e3c0ceb982fe72f94e7a636ab1fff06c29
diff --git a/cmlreaders/constants.py b/cmlreaders/constants.py index c4b84e3..3736269 100644 --- a/cmlreaders/constants.py +++ b/cmlreaders/constants.py @@ -100,7 +100,8 @@ rhino_paths = { 'protocols/{protocol}/subjects/{subject}/experiments/{experiment}/sessions/{session}/behavioral/current_processed/ps4_events.json' ], 'sources': [ - 'protocols/{protocol}/subjects/{subject}/experiments/{experiment}/sessions/{session}/ephys/current_processed/sources.json' + "protocols/{protocol}/subjects/{subject}/experiments/{experiment}/sessions/{session}/ephys/current_processed/sources.json", + "data/eeg/{subject}/eeg.noreref/params.txt", ], # Processed EEG data basename diff --git a/cmlreaders/readers/readers.py b/cmlreaders/readers/readers.py index fadd626..ac415ba 100644 --- a/cmlreaders/readers/readers.py +++ b/cmlreaders/readers/readers.py @@ -131,6 +131,10 @@ class EventReader(BaseCMLReader): if self.session is not None: df = df[df["session"] == self.session] + # ensure we have an experiment column + if "experiment" not in df: + df.loc[:, "experiment"] = self.experiment + return df def as_dataframe(self):
Error attempting to load pyFR eeg I can load pyFR events, but I receive an error when attempting to load eeg: ``` reader = CMLReader("TJ039", experiment="pyFR", session=0) events = reader.load("events") word = events[events.type=='WORD'] eeg = reader.load_eeg(events=word, rel_start=-100, rel_stop=100) ... KeyError: 'experiment' ``` Adding an `experiment` column to the DF then yields a `FileNotFoundError`.
pennmem/cmlreaders
diff --git a/cmlreaders/test/test_eeg.py b/cmlreaders/test/test_eeg.py index acfd8fd..1e2e02e 100644 --- a/cmlreaders/test/test_eeg.py +++ b/cmlreaders/test/test_eeg.py @@ -225,13 +225,13 @@ class TestFileReaders: @pytest.mark.rhino class TestEEGReader: - # FIXME: add LTP, pyFR cases - @pytest.mark.parametrize("subject,index,channel", [ - ("R1298E", 87, "CH88"), # Split EEG - ("R1387E", 13, "CH14"), # Ramulator HDF5 + @pytest.mark.parametrize("subject,experiment,index,channel", [ + ("R1298E", "FR1", 87, "CH88"), # Split EEG + ("R1387E", "FR1", 13, "CH14"), # Ramulator HDF5 + ("TJ039", "pyFR", 14, "CH15"), # pyFR ]) - def test_eeg_reader(self, subject, index, channel, rhino_root): - reader = CMLReader(subject=subject, experiment='FR1', session=0, + def test_eeg_reader(self, subject, experiment, index, channel, rhino_root): + reader = CMLReader(subject=subject, experiment=experiment, session=0, rootdir=rhino_root) events = reader.load("events") events = events[events["type"] == "WORD"].iloc[:2] diff --git a/cmlreaders/test/test_readers.py b/cmlreaders/test/test_readers.py index e95211d..e785219 100644 --- a/cmlreaders/test/test_readers.py +++ b/cmlreaders/test/test_readers.py @@ -221,6 +221,7 @@ class TestEventReader: path = datafile(filename) df = EventReader.fromfile(path) assert df.columns[0] == "eegoffset" + assert "experiment" in df.columns assert len(df)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 async-generator==1.10 attrs==22.2.0 Babel==2.11.0 bleach==4.1.0 cached-property==1.5.2 certifi==2021.5.30 charset-normalizer==2.0.12 -e git+https://github.com/pennmem/cmlreaders.git@2ffd443b75a0095a5c1edaaadf6e6e764c0e8a78#egg=cmlreaders codecov==2.1.13 coverage==6.2 cycler==0.11.0 decorator==5.1.1 defusedxml==0.7.1 docutils==0.18.1 entrypoints==0.4 flake8==3.9.2 h5py==3.1.0 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 iniconfig==1.1.1 ipython-genutils==0.2.0 Jinja2==3.0.3 jsonschema==3.2.0 jupyter-client==7.1.2 jupyter-core==4.9.2 jupyterlab-pygments==0.1.2 kiwisolver==1.3.1 MarkupSafe==2.0.1 matplotlib==3.3.4 mccabe==0.6.1 mistune==0.8.4 mne==0.23.4 nbclient==0.5.9 nbconvert==6.0.7 nbformat==5.1.3 nbsphinx==0.8.8 nest-asyncio==1.6.0 numpy==1.19.5 packaging==21.3 pandas==1.1.5 pandocfilters==1.5.1 Pillow==8.4.0 pluggy==1.0.0 py==1.11.0 pycodestyle==2.7.0 pyflakes==2.3.1 Pygments==2.14.0 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==25.1.2 requests==2.27.1 scipy==1.5.4 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinx-rtd-theme==2.0.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 testpath==0.6.0 tomli==1.2.3 tornado==6.1 traitlets==4.3.3 typing_extensions==4.1.1 urllib3==1.26.20 webencodings==0.5.1 zipp==3.6.0
name: cmlreaders channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - async-generator==1.10 - attrs==22.2.0 - babel==2.11.0 - bleach==4.1.0 - cached-property==1.5.2 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - cycler==0.11.0 - decorator==5.1.1 - defusedxml==0.7.1 - docutils==0.18.1 - entrypoints==0.4 - flake8==3.9.2 - h5py==3.1.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - ipython-genutils==0.2.0 - jinja2==3.0.3 - jsonschema==3.2.0 - jupyter-client==7.1.2 - jupyter-core==4.9.2 - jupyterlab-pygments==0.1.2 - kiwisolver==1.3.1 - markupsafe==2.0.1 - matplotlib==3.3.4 - mccabe==0.6.1 - mistune==0.8.4 - mne==0.23.4 - nbclient==0.5.9 - nbconvert==6.0.7 - nbformat==5.1.3 - nbsphinx==0.8.8 - nest-asyncio==1.6.0 - numpy==1.19.5 - packaging==21.3 - pandas==1.1.5 - pandocfilters==1.5.1 - pillow==8.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.7.0 - pyflakes==2.3.1 - pygments==2.14.0 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==25.1.2 - requests==2.27.1 - scipy==1.5.4 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinx-rtd-theme==2.0.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jquery==4.1 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - testpath==0.6.0 - tomli==1.2.3 - tornado==6.1 - traitlets==4.3.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - webencodings==0.5.1 - zipp==3.6.0 prefix: /opt/conda/envs/cmlreaders
[ "cmlreaders/test/test_readers.py::TestEventReader::test_load_matlab[all_events]", "cmlreaders/test/test_readers.py::TestEventReader::test_load_matlab[task_events]", "cmlreaders/test/test_readers.py::TestEventReader::test_load_matlab[math_events]" ]
[ "cmlreaders/test/test_eeg.py::TestFileReaders::test_split_eeg_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_split_eeg_reader_missing_contacts", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1345D-FR1-0]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1363T-FR1-0]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader_rhino[R1392N-PAL1-0]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_reader", "cmlreaders/test/test_eeg.py::TestFileReaders::test_ramulator_hdf5_rereference", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[R1298E-FR1-87-CH88]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[R1387E-FR1-13-CH14]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader[TJ039-pyFR-14-CH15]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_read_whole_session[R1161E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader_with_events[R1161E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_reader_with_events[R1387E]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1384J-pairs-False-43-LS12-LS1]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1111M-pairs-True-43-LPOG23-LPOG31]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1111M-contacts-True-43-LPOG44]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_rereference[R1286J-contacts-True-43-LJ16]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1384J-ind.region-insula-10-200]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1288P-ind.region-lateralorbitofrontal-5-200]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_filter_channels[R1111M-ind.region-middletemporal-18-100]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[RamulatorHDF5Reader-True]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[RamulatorHDF5Reader-False]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_multisession[subjects0-experiments0]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_multisession[subjects1-experiments1]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_multisession[subjects2-experiments2]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_channel_discrepancies[R1387E-catFR5-0-120-125]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1405E-0-0-contacts]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1405E-0-0-pairs]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-0-contacts]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-0-pairs]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-1-contacts]", "cmlreaders/test/test_readers.py::TestMontageReader::test_load[R1006P-0-1-pairs]", "cmlreaders/test/test_readers.py::TestElectrodeCategoriesReader::test_load[R1111M-lens0]", "cmlreaders/test/test_readers.py::TestElectrodeCategoriesReader::test_load[R1052E-lens1]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_as_methods[baseline_classifier-pyobject]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_as_methods[used_classifier-pyobject]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_to_methods[baseline_classifier-binary]", "cmlreaders/test/test_readers.py::TestClassifierContainerReader::test_to_methods[used_classifier-binary]" ]
[ "cmlreaders/test/test_eeg.py::TestEEGMetaReader::test_load[R1389J-sources.json-int16-1641165-1000]", "cmlreaders/test/test_eeg.py::TestEEGMetaReader::test_load[TJ001-TJ001_pyFR_params.txt-int16-None-400.0]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_include_contact[True]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_include_contact[False]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_scheme_type[/cmlreaders/cmlreaders/test/data/contacts.json-contacts]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_scheme_type[/cmlreaders/cmlreaders/test/data/pairs.json-pairs]", "cmlreaders/test/test_eeg.py::TestBaseEEGReader::test_scheme_type[-None]", "cmlreaders/test/test_eeg.py::TestFileReaders::test_npy_reader", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_absolute[TJ001-TJ001_events.mat-expected_basenames0]", "cmlreaders/test/test_eeg.py::TestEEGReader::test_eeg_absolute[R1389J-task_events.json-expected_basenames1]", "cmlreaders/test/test_eeg.py::TestRereference::test_rereference[SplitEEGReader-True]", "cmlreaders/test/test_eeg.py::TestLoadEEG::test_load_with_empty_events", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-voxel_coordinates-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-voxel_coordinates-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-voxel_coordinates-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-leads-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-leads-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-leads-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-classifier_excluded_leads-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-classifier_excluded_leads-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-classifier_excluded_leads-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-good_leads-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-good_leads-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-good_leads-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-jacksheet-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-jacksheet-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-jacksheet-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-area-dataframe]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-area-recarray]", "cmlreaders/test/test_readers.py::TestTextReader::test_as_methods[R1389J-0-area-dict]", "cmlreaders/test/test_readers.py::TestTextReader::test_read_jacksheet", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-voxel_coordinates-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-voxel_coordinates-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-leads-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-leads-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-classifier_excluded_leads-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-classifier_excluded_leads-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-good_leads-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-good_leads-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-jacksheet-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-jacksheet-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-area-json]", "cmlreaders/test/test_readers.py::TestTextReader::test_to_methods[R1389J-0-area-csv]", "cmlreaders/test/test_readers.py::TestTextReader::test_failures", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-electrode_coordinates-dataframe]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-electrode_coordinates-recarray]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-electrode_coordinates-dict]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-prior_stim_results-dataframe]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-prior_stim_results-recarray]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-prior_stim_results-dict]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-target_selection_table-dataframe]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-target_selection_table-recarray]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_as_methods[R1409D-0-target_selection_table-dict]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-electrode_coordinates-json]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-electrode_coordinates-csv]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-prior_stim_results-json]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-prior_stim_results-csv]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-target_selection_table-json]", "cmlreaders/test/test_readers.py::TestRAMCSVReader::test_to_methods[R1409D-0-target_selection_table-csv]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_as_methods[R1409D-catFR1-1-event_log-dataframe]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_as_methods[R1409D-catFR1-1-event_log-recarray]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_as_methods[R1409D-catFR1-1-event_log-dict]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_to_methods[R1409D-catFR1-1-event_log-json]", "cmlreaders/test/test_readers.py::TestRamulatorEventLogReader::test_to_methods[R1409D-catFR1-1-event_log-csv]", "cmlreaders/test/test_readers.py::TestBaseJSONReader::test_load", "cmlreaders/test/test_readers.py::TestEventReader::test_load_json", "cmlreaders/test/test_readers.py::TestLocalizationReader::test_load", "cmlreaders/test/test_readers.py::test_fromfile[ElectrodeCategoriesReader-/cmlreaders/cmlreaders/test/data/electrode_categories.txt-dict]", "cmlreaders/test/test_readers.py::test_fromfile[MontageReader-/cmlreaders/cmlreaders/test/data/pairs.json-DataFrame]", "cmlreaders/test/test_readers.py::test_fromfile[MontageReader-/cmlreaders/cmlreaders/test/data/contacts.json-DataFrame]", "cmlreaders/test/test_readers.py::test_fromfile[RamulatorEventLogReader-/cmlreaders/cmlreaders/test/data/event_log.json-DataFrame]" ]
[]
null
2,866
[ "cmlreaders/constants.py", "cmlreaders/readers/readers.py" ]
[ "cmlreaders/constants.py", "cmlreaders/readers/readers.py" ]
scieloorg__xylose-159
6bb32ebe34da88381518e84790963315b54db9c8
2018-08-03 20:24:16
6bb32ebe34da88381518e84790963315b54db9c8
diff --git a/xylose/scielodocument.py b/xylose/scielodocument.py index 27af27d..ad6ba0a 100644 --- a/xylose/scielodocument.py +++ b/xylose/scielodocument.py @@ -51,8 +51,8 @@ REPLACE_TAGS_MIXED_CITATION = ( def warn_future_deprecation(old, new, details=''): - warnings.simplefilter("always") - msg = '"{}" will be deprected in future version. Use "{}" instead. {}' + msg = '"{}" will be deprecated in future version. '.format(old) + \ + 'Use "{}" instead. {}'.format(new, details) warnings.warn(msg, PendingDeprecationWarning) @@ -2830,9 +2830,8 @@ class Citation(object): """ It retrieves the analytic institution authors of a reference, no matter the publication type of the reference. - It is not desirable to restrict the conditioned return to the - publication type, because some reference standards are very peculiar - and not only articles or books have institution authors. + It is not desirable to return conditioned to the publication type, + because not only articles or books have institution authors. IT REPLACES analytic_institution """ institutions = [] @@ -2846,13 +2845,14 @@ class Citation(object): """ This method retrieves the institutions in the given citation. The citation must be an article or book citation, if it exists. - IT WILL BE DEPRECATED + IT WILL BE DEPRECATED. Use analytic_institution_authors instead. """ warn_future_deprecation( 'analytic_institution', 'analytic_institution_authors', - 'analytic_institution_authors is more suitable name and ' - 'returns the authors independending on publication type' + 'Changes: ' + '1) analytic_institution_authors is a more suitable name; ' + '2) reconsidered the constrictions related to the publication type' ) institutions = [] if self.publication_type in [u'article', u'book']: @@ -2868,9 +2868,8 @@ class Citation(object): """ It retrieves the monographic institution authors of a reference, no matter the publication type of the reference. - It is not desirable to restrict the conditioned return to the - publication type, because some reference standards are very peculiar - and not only books have institution authors. + It is not desirable to return conditioned to the publication type, + because not only books have institution authors. IT REPLACES monographic_institution """ if 'v30' in self.data: @@ -2886,13 +2885,14 @@ class Citation(object): """ This method retrieves the institutions in the given citation. The citation must be a book citation, if it exists. - IT WILL BE DEPRECATED + IT WILL BE DEPRECATED. Use monographic_institution_authors instead. """ warn_future_deprecation( 'monographic_institution', 'monographic_institution_authors', - 'monographic_institution_authors is more suitable name and ' - 'returns the authors independending on publication type' + 'Changes: ' + '1) monographic_institution_authors is a more suitable name; ' + '2) reconsidered the constrictions related to the publication type' ) institutions = [] if self.publication_type == u'book' and 'v17' in self.data: @@ -3056,8 +3056,38 @@ class Citation(object): return self.data['v237'][0]['_'] @property - def authors(self): + def authors_groups(self): + """ + It retrieves all the authors (person and institution) and + identifies their type (analytic or monographic). + IT REPLACES authors which returns only person authors + """ + authors = {} + if self.analytic_authors_group is not None: + authors['analytic'] = self.analytic_authors_group + if self.monographic_authors_group is not None: + authors['monographic'] = self.monographic_authors_group + if len(authors) > 0: + return authors + @property + def authors(self): + """ + This method retrieves the analytic and monographic person authors + of a citation. + IT WILL BE DEPRECATED. + Use authors_groups to retrieve all the authors (person and institution) + and (analytic and monographic) + """ + warn_future_deprecation( + 'authors', + 'author_groups', + 'The atribute "author_groups" returns all the authors ' + '(person and institution) ' + 'identified by their type (analytic or monographic). ' + 'The atribute "authors" returns only person authors and do not ' + 'differs analytic from monographic' + ) aa = self.analytic_authors or [] ma = self.monographic_authors or [] return aa + ma @@ -3067,8 +3097,8 @@ class Citation(object): """ It retrieves the analytic person authors of a reference, no matter the publication type of the reference. - It is not desirable to restrict the conditioned return to the - publication type, because some reference standards are very peculiar + It is not desirable to return conditioned to the + publication type, because some reference standards are peculiar and not only articles or books have person authors. IT REPLACES analytic_authors """ @@ -3084,17 +3114,39 @@ class Citation(object): if len(authors) > 0: return authors + @property + def analytic_authors_group(self): + """ + It retrieves all the analytic authors (person and institution). + IT REPLACES analytic_authors which returns only person authors + """ + analytic = {} + if self.analytic_person_authors is not None: + analytic['person'] = self.analytic_person_authors + if self.analytic_institution_authors is not None: + analytic['institution'] = self.analytic_institution_authors + if len(analytic) > 0: + return analytic + @property def analytic_authors(self): """ - This method retrieves the authors of the given citation. These authors - may correspond to an article, book analytic, link or thesis. - IT WILL BE DEPRECATED. Use analytic_person_authors instead + It retrieves only analytic person authors of a reference of + an article, book chapter, link or thesis. + IT WILL BE DEPRECATED. + To retrieve only analytic person authors, + use analytic_person_authors instead. + To retrieve all analytic authors (person and institution), + use analytic_authors_group instead. """ warn_future_deprecation( - 'analytic_authors', - 'analytic_person_authors', - 'analytic_person_authors is more suitable name' + 'analytic_authors', + 'analytic_person_authors or analytic_authors_group', + 'The attribute "analytic_authors" returns only person authors. ' + 'To retrieve all the analytic authors (person and institution),' + ' use analytic_authors_group. ' + 'To retrieve only the analytic person authors,' + ' use analytic_person_authors. ' ) authors = [] if 'v10' in self.data: @@ -3115,11 +3167,13 @@ class Citation(object): """ It retrieves the monographic person authors of a reference, no matter the publication type of the reference. - It is not desirable to restrict the conditioned return to the - publication type, because some reference standards are very peculiar - and not only articles or books have person authors. + It is not desirable to return conditioned to the + publication type, because some reference standards are peculiar + and not only books have person authors. IT REPLACES monographic_authors """ + if 'v30' in self.data: + return authors = [] for author in self.data.get('v16', []): authordict = {} @@ -3132,18 +3186,38 @@ class Citation(object): if len(authors) > 0: return authors + @property + def monographic_authors_group(self): + """ + It retrieves all the monographic authors (person and institution). + IT REPLACES monographic_authors + """ + monographic = {} + if self.monographic_person_authors is not None: + monographic['person'] = self.monographic_person_authors + if self.monographic_institution_authors is not None: + monographic['institution'] = self.monographic_institution_authors + if len(monographic) > 0: + return monographic + @property def monographic_authors(self): """ - This method retrieves the authors of the given book citation. - These authors may - correspond to a book monography citation. - IT WILL BE DEPRECATED. Use monographic_person_authors instead. + It retrieves only monographic person authors of a reference. + IT WILL BE DEPRECATED. + To retrieve only monographic person authors, + use monographic_person_authors instead. + To retrieve all monographic authors (person and institution), + use monographic_authors_group instead. """ warn_future_deprecation( - 'monographic_authors', - 'monographic_person_authors', - 'monographic_person_authors is more suitable name' + 'monographic_authors', + 'monographic_person_authors or monographic_authors_group', + 'The attribute "monographic_authors" returns only person authors. ' + 'To retrieve all the monographic authors (person and institution),' + ' use monographic_authors_group. ' + 'To retrieve only the monographic person authors,' + ' use monographic_person_authors. ' ) authors = [] if 'v16' in self.data: @@ -3159,15 +3233,44 @@ class Citation(object): if len(authors) > 0: return authors + @property + def first_author_info(self): + """ + It retrieves the info of the first author: + (analytic or monographic), (person or institution), author data, + of a citation, independent of citation type. + :returns: (analytic or monographic, person or institution, author data) + IT REPLACES first_author + """ + types = [('analytic', 'person'), + ('analytic', 'institution'), + ('monographic', 'person'), + ('monographic', 'institution'), + ] + authors = [self.analytic_person_authors, + self.analytic_institution_authors, + self.monographic_person_authors, + self.monographic_institution_authors, + ] + for a, a_type in zip(authors, types): + if a is not None: + return a_type[0], a_type[1], a[0] + @property def first_author(self): """ - This property retrieves the first author of the given citation, + It retrieves the first person author of the given citation, independent of citation type. - :returns: dict with keys ``given_names`` and ``surname`` + IT WILL BE DEPRECATED. Use first_author_info instead. """ - + warn_future_deprecation( + 'first_author', + 'first_author_info', + 'The attribute "first_author" returns only a person author. ' + 'The attribute "first_author_info" returns info of the ' + 'first author independing if it is person or institution. ' + ) if self.authors: return self.authors[0] elif self.monographic_authors:
[referência] Incluir autores institucionais no retorno de authors, analytic_authors, monographic_authors e first_author authors, analytic_authors, monographic_authors e first_author deveriam retornar autores institucionais, mas não estão. Mudar de nome e indicar obsolescência para que outras aplicações não quebrem com a mudança. ``` @property def authors(self): aa = self.analytic_authors or [] ma = self.monographic_authors or [] return aa + ma ``` ``` @property def analytic_authors(self): """ This method retrieves the authors of the given citation. These authors may correspond to an article, book analytic, link or thesis. """ authors = [] if 'v10' in self.data: for author in self.data['v10']: authordict = {} if 's' in author: authordict['surname'] = html_decode(author['s']) if 'n' in author: authordict['given_names'] = html_decode(author['n']) if 's' in author or 'n' in author: authors.append(authordict) if len(authors) > 0: return authors ``` ``` @property def monographic_authors(self): """ This method retrieves the authors of the given book citation. These authors may correspond to a book monography citation. """ authors = [] if 'v16' in self.data: for author in self.data['v16']: authordict = {} if 's' in author: authordict['surname'] = html_decode(author['s']) if 'n' in author: authordict['given_names'] = html_decode(author['n']) if 's' in author or 'n' in author: authors.append(authordict) if len(authors) > 0: return authors ``` ``` @property def first_author(self): """ This property retrieves the first author of the given citation, independent of citation type. :returns: dict with keys ``given_names`` and ``surname`` """ if self.authors: return self.authors[0] elif self.monographic_authors: return self.monographic_authors[0] ```
scieloorg/xylose
diff --git a/tests/test_document.py b/tests/test_document.py index 46738b9..9b0e572 100644 --- a/tests/test_document.py +++ b/tests/test_document.py @@ -8,6 +8,8 @@ import warnings from xylose.scielodocument import Article, Citation, Journal, Issue, html_decode, UnavailableMetadataException from xylose import tools +warnings.simplefilter("always") + class ToolsTests(unittest.TestCase): @@ -4135,7 +4137,6 @@ class CitationTest(unittest.TestCase): json_citation['v30'] = [{u'_': u'It is the journal title'}] json_citation['v12'] = [{u'_': u'It is the article title'}] - json_citation['v11'] = [{u'_': u'Article Institution'}] json_citation['v11'] = [ {u'_': u'Article Institution'}, {u'_': u'Article Institution 2'}, @@ -4295,6 +4296,16 @@ class CitationTest(unittest.TestCase): self.assertEqual(citation.institutions, None) + def test_pending_deprecation_warning_of_authors(self): + citation = Citation({}) + with warnings.catch_warnings(record=True) as w: + assert citation.authors == [] + assert len(w) == 3 + assert 'authors' in str(w[0].message) + assert 'analytic_authors' in str(w[1].message) + assert 'monographic_authors' in str(w[2].message) + assert issubclass(w[-1].category, PendingDeprecationWarning) + def test_authors_article(self): json_citation = {} @@ -4438,6 +4449,232 @@ class CitationTest(unittest.TestCase): self.assertEqual(citation.analytic_person_authors, expected) + def test_authors_groups_of_a_chapter(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + ] + json_citation['v17'] = [{u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + json_citation['v10'] = [ + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v11'] = [ + {u'_': u'Article Institution 3'}, + {u'_': u'Article Institution 4'}, + ] + expected_i = [u'Article Institution', u'Article Institution 2'] + expected_p = [{'surname': u'Sullivan', u'given_names': u'Mike'}, + {'surname': u'Hurricane Carter', + u'given_names': u'Rubin'}, + ] + expected_m = {'person': expected_p, 'institution': expected_i} + + expected_i = [u'Article Institution 3', u'Article Institution 4'] + expected_p = [{'surname': u'Maguila Rodrigues', + u'given_names': u'Adilson'}, + {u'given_names': u'Acelino Popó Freitas'}, + {'surname': u'Zé Marreta'}] + expected_a = {'person': expected_p, 'institution': expected_i} + + citation = Citation(json_citation) + a = citation.authors_groups + self.assertEqual(a['analytic'], expected_a) + self.assertEqual(a['monographic'], expected_m) + self.assertEqual( + citation.authors_groups, + { + 'analytic': expected_a, + 'monographic': expected_m + } + ) + + def test_authors_groups_of_a_journal(self): + json_citation = {} + json_citation['v30'] = [{u'_': u'It is the journal title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + ] + json_citation['v17'] = [{u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + json_citation['v10'] = [ + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v11'] = [ + {u'_': u'Article Institution 3'}, + {u'_': u'Article Institution 4'}, + ] + expected_i = [u'Article Institution 3', u'Article Institution 4'] + expected_p = [{'surname': u'Maguila Rodrigues', + u'given_names': u'Adilson'}, + {u'given_names': u'Acelino Popó Freitas'}, + {'surname': u'Zé Marreta'}] + expected_a = {'person': expected_p, 'institution': expected_i} + + citation = Citation(json_citation) + self.assertEqual( + citation.authors_groups, + { + 'analytic': expected_a, + } + ) + + def test_without_authors_groups(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + citation = Citation(json_citation) + self.assertEqual(citation.authors_groups, None) + + def test_analytic_authors_group_of_a_book(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v10'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v11'] = [ + {u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + expected_i = [u'Article Institution', u'Article Institution 2'] + expected_p = [{u'given_names': u'Mike', 'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + expected = {'person': expected_p, 'institution': expected_i} + citation = Citation(json_citation) + self.assertEqual(citation.analytic_authors_group, expected) + + def test_analytic_authors_group_of_a_chapter(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + json_citation['v10'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v11'] = [ + {u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + expected_i = [u'Article Institution', u'Article Institution 2'] + expected_p = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + expected = {'person': expected_p, 'institution': expected_i} + citation = Citation(json_citation) + self.assertEqual(citation.analytic_authors_group, expected) + + def test_analytic_authors_group_of_a_journal(self): + json_citation = {} + json_citation['v30'] = [{u'_': u'It is the journal title'}] + json_citation['v12'] = [{u'_': u'It is the article title'}] + json_citation['v10'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v11'] = [ + {u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + expected_i = [u'Article Institution', u'Article Institution 2'] + expected_p = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + expected = {'person': expected_p, 'institution': expected_i} + citation = Citation(json_citation) + self.assertEqual(citation.analytic_authors_group, expected) + + def test_without_analytic_authors_group(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + citation = Citation(json_citation) + self.assertEqual(citation.analytic_authors_group, None) + + def test_monographic_authors_group_of_a_book(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v17'] = [ + {u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + expected_i = [u'Article Institution', u'Article Institution 2'] + expected_p = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + expected = {'person': expected_p, 'institution': expected_i} + citation = Citation(json_citation) + self.assertEqual(citation.monographic_authors_group, expected) + + def test_monographic_authors_group_of_a_chapter(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v17'] = [ + {u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + expected_i = [u'Article Institution', u'Article Institution 2'] + expected_p = [{u'given_names': u'Mike', u'surname': u'Sullivan'}, + {u'given_names': u'Rubin', u'surname': u'Hurricane Carter'}, + {u'given_names': u'Adilson', u'surname': u'Maguila Rodrigues'}, + {u'given_names': u'Acelino Popó Freitas'}, + {u'surname': u'Zé Marreta'}] + expected = {'person': expected_p, 'institution': expected_i} + citation = Citation(json_citation) + self.assertEqual(citation.monographic_authors_group, expected) + + def test_monographic_authors_group_of_a_journal(self): + json_citation = {} + json_citation['v30'] = [{u'_': u'It is the journal title'}] + json_citation['v12'] = [{u'_': u'It is the article title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v17'] = [ + {u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + citation = Citation(json_citation) + self.assertEqual(citation.monographic_authors_group, None) + + def test_without_monographic_authors_group(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + citation = Citation(json_citation) + self.assertEqual(citation.monographic_authors_group, None) + def test_monographic_person_authors(self): json_citation = {} @@ -4532,6 +4769,87 @@ class CitationTest(unittest.TestCase): self.assertEqual(citation.monographic_authors, None) + def test_pending_deprecation_warning_of_first_author(self): + citation = Citation({}) + with warnings.catch_warnings(record=True) as w: + assert citation.first_author is None + assert issubclass(w[-1].category, PendingDeprecationWarning) + assert len(w) > 1 + assert 'first_author' in str(w[0].message) + + def test_first_author_info__analytic_person(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + ] + json_citation['v17'] = [{u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + json_citation['v10'] = [ + {u's': u'Maguila Rodrigues', u'n': u'Adilson'}, + {u'n': u'Acelino Popó Freitas'}, + {u's': u'Zé Marreta'}] + json_citation['v11'] = [ + {u'_': u'Article Institution 3'}, + {u'_': u'Article Institution 4'}, + ] + citation = Citation(json_citation) + self.assertEqual( + citation.first_author_info, + ('analytic', 'person', {'surname': u'Maguila Rodrigues', + u'given_names': u'Adilson'}) + ) + + def test_first_author_info__monographic_person(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + ] + json_citation['v17'] = [{u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + citation = Citation(json_citation) + self.assertEqual( + citation.first_author_info, + ('monographic', 'person', + {u'surname': u'Sullivan', u'given_names': u'Mike'},) + ) + + def test_first_author_info__analytic_institution(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v12'] = [{u'_': u'It is the chapter title'}] + json_citation['v16'] = [{u's': u'Sullivan', u'n': u'Mike'}, + {u's': u'Hurricane Carter', u'n': u'Rubin'}, + ] + json_citation['v17'] = [{u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + json_citation['v11'] = [ + {u'_': u'Article Institution 3'}, + {u'_': u'Article Institution 4'}, + ] + citation = Citation(json_citation) + self.assertEqual( + citation.first_author_info, + ('analytic', 'institution', u'Article Institution 3') + ) + + def test_first_author_info__monographic_institution(self): + json_citation = {} + json_citation['v18'] = [{u'_': u'It is the book title'}] + json_citation['v17'] = [{u'_': u'Article Institution'}, + {u'_': u'Article Institution 2'}, + ] + citation = Citation(json_citation) + self.assertEqual( + citation.first_author_info, + ('monographic', 'institution', u'Article Institution') + ) + def test_first_author_article(self): json_citation = {}
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
1.31
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "nose-cov", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cov-core==1.15.0 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work legendarium==2.0.6 nose==1.3.7 nose-cov==1.6 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/scieloorg/xylose.git@6bb32ebe34da88381518e84790963315b54db9c8#egg=xylose
name: xylose channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cov-core==1.15.0 - coverage==7.8.0 - legendarium==2.0.6 - nose==1.3.7 - nose-cov==1.6 prefix: /opt/conda/envs/xylose
[ "tests/test_document.py::CitationTest::test_analytic_authors_group_of_a_book", "tests/test_document.py::CitationTest::test_analytic_authors_group_of_a_chapter", "tests/test_document.py::CitationTest::test_analytic_authors_group_of_a_journal", "tests/test_document.py::CitationTest::test_authors_groups_of_a_chapter", "tests/test_document.py::CitationTest::test_authors_groups_of_a_journal", "tests/test_document.py::CitationTest::test_first_author_info__analytic_institution", "tests/test_document.py::CitationTest::test_first_author_info__analytic_person", "tests/test_document.py::CitationTest::test_first_author_info__monographic_institution", "tests/test_document.py::CitationTest::test_first_author_info__monographic_person", "tests/test_document.py::CitationTest::test_monographic_authors_group_of_a_book", "tests/test_document.py::CitationTest::test_monographic_authors_group_of_a_chapter", "tests/test_document.py::CitationTest::test_monographic_authors_group_of_a_journal", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_authors", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_first_author", "tests/test_document.py::CitationTest::test_without_analytic_authors_group", "tests/test_document.py::CitationTest::test_without_authors_groups", "tests/test_document.py::CitationTest::test_without_monographic_authors_group" ]
[]
[ "tests/test_document.py::ToolsTests::test_creative_commons_html_1", "tests/test_document.py::ToolsTests::test_creative_commons_html_2", "tests/test_document.py::ToolsTests::test_creative_commons_html_3", "tests/test_document.py::ToolsTests::test_creative_commons_html_4", "tests/test_document.py::ToolsTests::test_creative_commons_html_5", "tests/test_document.py::ToolsTests::test_creative_commons_html_6", "tests/test_document.py::ToolsTests::test_creative_commons_text_1", "tests/test_document.py::ToolsTests::test_creative_commons_text_2", "tests/test_document.py::ToolsTests::test_creative_commons_text_3", "tests/test_document.py::ToolsTests::test_creative_commons_text_4", "tests/test_document.py::ToolsTests::test_creative_commons_text_5", "tests/test_document.py::ToolsTests::test_creative_commons_text_6", "tests/test_document.py::ToolsTests::test_creative_commons_text_7", "tests/test_document.py::ToolsTests::test_creative_commons_text_8", "tests/test_document.py::ToolsTests::test_get_date_wrong_day", "tests/test_document.py::ToolsTests::test_get_date_wrong_day_month", "tests/test_document.py::ToolsTests::test_get_date_wrong_day_month_not_int", "tests/test_document.py::ToolsTests::test_get_date_wrong_day_not_int", "tests/test_document.py::ToolsTests::test_get_date_wrong_month_not_int", "tests/test_document.py::ToolsTests::test_get_date_year", "tests/test_document.py::ToolsTests::test_get_date_year_day", "tests/test_document.py::ToolsTests::test_get_date_year_month", "tests/test_document.py::ToolsTests::test_get_date_year_month_day", "tests/test_document.py::ToolsTests::test_get_date_year_month_day_31", "tests/test_document.py::ToolsTests::test_get_language_iso639_1_defined", "tests/test_document.py::ToolsTests::test_get_language_iso639_1_undefined", "tests/test_document.py::ToolsTests::test_get_language_iso639_2_defined", "tests/test_document.py::ToolsTests::test_get_language_iso639_2_undefined", "tests/test_document.py::ToolsTests::test_get_language_without_iso_format", "tests/test_document.py::IssueTests::test_assets_code_month", "tests/test_document.py::IssueTests::test_collection_acronym", "tests/test_document.py::IssueTests::test_creation_date", "tests/test_document.py::IssueTests::test_creation_date_1", "tests/test_document.py::IssueTests::test_creation_date_2", "tests/test_document.py::IssueTests::test_ctrl_vocabulary", "tests/test_document.py::IssueTests::test_ctrl_vocabulary_out_of_choices", "tests/test_document.py::IssueTests::test_is_ahead", "tests/test_document.py::IssueTests::test_is_ahead_1", "tests/test_document.py::IssueTests::test_is_marked_up", "tests/test_document.py::IssueTests::test_is_press_release_false_1", "tests/test_document.py::IssueTests::test_is_press_release_false_2", "tests/test_document.py::IssueTests::test_is_press_release_true", "tests/test_document.py::IssueTests::test_issue", "tests/test_document.py::IssueTests::test_issue_journal_without_journal_metadata", "tests/test_document.py::IssueTests::test_issue_label", "tests/test_document.py::IssueTests::test_issue_url", "tests/test_document.py::IssueTests::test_order", "tests/test_document.py::IssueTests::test_permission_from_journal", "tests/test_document.py::IssueTests::test_permission_id", "tests/test_document.py::IssueTests::test_permission_t0", "tests/test_document.py::IssueTests::test_permission_t1", "tests/test_document.py::IssueTests::test_permission_t2", "tests/test_document.py::IssueTests::test_permission_t3", "tests/test_document.py::IssueTests::test_permission_t4", "tests/test_document.py::IssueTests::test_permission_text", "tests/test_document.py::IssueTests::test_permission_url", "tests/test_document.py::IssueTests::test_permission_without_v540", "tests/test_document.py::IssueTests::test_permission_without_v540_t", "tests/test_document.py::IssueTests::test_processing_date", "tests/test_document.py::IssueTests::test_processing_date_1", "tests/test_document.py::IssueTests::test_publication_date", "tests/test_document.py::IssueTests::test_sections", "tests/test_document.py::IssueTests::test_standard", "tests/test_document.py::IssueTests::test_standard_out_of_choices", "tests/test_document.py::IssueTests::test_start_end_month", "tests/test_document.py::IssueTests::test_start_end_month_1", "tests/test_document.py::IssueTests::test_start_end_month_2", "tests/test_document.py::IssueTests::test_start_end_month_3", "tests/test_document.py::IssueTests::test_start_end_month_4", "tests/test_document.py::IssueTests::test_start_end_month_5", "tests/test_document.py::IssueTests::test_start_end_month_6", "tests/test_document.py::IssueTests::test_supplement_number", "tests/test_document.py::IssueTests::test_supplement_volume", "tests/test_document.py::IssueTests::test_title_titles", "tests/test_document.py::IssueTests::test_title_titles_1", "tests/test_document.py::IssueTests::test_title_without_titles", "tests/test_document.py::IssueTests::test_total_documents", "tests/test_document.py::IssueTests::test_total_documents_without_data", "tests/test_document.py::IssueTests::test_type_pressrelease", "tests/test_document.py::IssueTests::test_type_regular", "tests/test_document.py::IssueTests::test_type_supplement_1", "tests/test_document.py::IssueTests::test_type_supplement_2", "tests/test_document.py::IssueTests::test_update_date", "tests/test_document.py::IssueTests::test_update_date_1", "tests/test_document.py::IssueTests::test_update_date_2", "tests/test_document.py::IssueTests::test_update_date_3", "tests/test_document.py::IssueTests::test_volume", "tests/test_document.py::IssueTests::test_without_ctrl_vocabulary", "tests/test_document.py::IssueTests::test_without_ctrl_vocabulary_also_in_journal", "tests/test_document.py::IssueTests::test_without_issue", "tests/test_document.py::IssueTests::test_without_processing_date", "tests/test_document.py::IssueTests::test_without_publication_date", "tests/test_document.py::IssueTests::test_without_standard", "tests/test_document.py::IssueTests::test_without_standard_also_in_journal", "tests/test_document.py::IssueTests::test_without_suplement_number", "tests/test_document.py::IssueTests::test_without_supplement_volume", "tests/test_document.py::IssueTests::test_without_volume", "tests/test_document.py::JournalTests::test_abstract_languages", "tests/test_document.py::JournalTests::test_abstract_languages_without_v350", "tests/test_document.py::JournalTests::test_any_issn_priority_electronic", "tests/test_document.py::JournalTests::test_any_issn_priority_electronic_without_electronic", "tests/test_document.py::JournalTests::test_any_issn_priority_print", "tests/test_document.py::JournalTests::test_any_issn_priority_print_without_print", "tests/test_document.py::JournalTests::test_cnn_code", "tests/test_document.py::JournalTests::test_collection_acronym", "tests/test_document.py::JournalTests::test_creation_date", "tests/test_document.py::JournalTests::test_ctrl_vocabulary", "tests/test_document.py::JournalTests::test_ctrl_vocabulary_out_of_choices", "tests/test_document.py::JournalTests::test_current_status", "tests/test_document.py::JournalTests::test_current_status_lots_of_changes_study_case_1", "tests/test_document.py::JournalTests::test_current_status_some_changes", "tests/test_document.py::JournalTests::test_current_without_v51", "tests/test_document.py::JournalTests::test_editor_address", "tests/test_document.py::JournalTests::test_editor_address_without_data", "tests/test_document.py::JournalTests::test_editor_email", "tests/test_document.py::JournalTests::test_editor_email_without_data", "tests/test_document.py::JournalTests::test_first_number", "tests/test_document.py::JournalTests::test_first_number_1", "tests/test_document.py::JournalTests::test_first_volume", "tests/test_document.py::JournalTests::test_first_volume_1", "tests/test_document.py::JournalTests::test_first_year", "tests/test_document.py::JournalTests::test_first_year_1", "tests/test_document.py::JournalTests::test_first_year_2", "tests/test_document.py::JournalTests::test_first_year_3", "tests/test_document.py::JournalTests::test_first_year_4", "tests/test_document.py::JournalTests::test_in_ahci", "tests/test_document.py::JournalTests::test_in_scie", "tests/test_document.py::JournalTests::test_in_ssci", "tests/test_document.py::JournalTests::test_institutional_url", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_false_with_field_regular", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_false_with_field_undefined", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_false_without_field", "tests/test_document.py::JournalTests::test_is_publishing_model_continuous_true", "tests/test_document.py::JournalTests::test_is_publishing_model_regular_1", "tests/test_document.py::JournalTests::test_is_publishing_model_regular_2", "tests/test_document.py::JournalTests::test_journal", "tests/test_document.py::JournalTests::test_journal_abbreviated_title", "tests/test_document.py::JournalTests::test_journal_acronym", "tests/test_document.py::JournalTests::test_journal_copyrighter", "tests/test_document.py::JournalTests::test_journal_copyrighter_without_copyright", "tests/test_document.py::JournalTests::test_journal_fulltitle", "tests/test_document.py::JournalTests::test_journal_fulltitle_without_subtitle", "tests/test_document.py::JournalTests::test_journal_fulltitle_without_title", "tests/test_document.py::JournalTests::test_journal_mission", "tests/test_document.py::JournalTests::test_journal_mission_without_language_key", "tests/test_document.py::JournalTests::test_journal_mission_without_mission", "tests/test_document.py::JournalTests::test_journal_mission_without_mission_text", "tests/test_document.py::JournalTests::test_journal_mission_without_mission_text_and_language", "tests/test_document.py::JournalTests::test_journal_other_title_without_other_titles", "tests/test_document.py::JournalTests::test_journal_other_titles", "tests/test_document.py::JournalTests::test_journal_publisher_country", "tests/test_document.py::JournalTests::test_journal_publisher_country_not_findable_code", "tests/test_document.py::JournalTests::test_journal_publisher_country_without_country", "tests/test_document.py::JournalTests::test_journal_sponsors", "tests/test_document.py::JournalTests::test_journal_sponsors_with_empty_items", "tests/test_document.py::JournalTests::test_journal_sponsors_without_sponsors", "tests/test_document.py::JournalTests::test_journal_subtitle", "tests/test_document.py::JournalTests::test_journal_title", "tests/test_document.py::JournalTests::test_journal_title_nlm", "tests/test_document.py::JournalTests::test_journal_url", "tests/test_document.py::JournalTests::test_journal_without_subtitle", "tests/test_document.py::JournalTests::test_languages", "tests/test_document.py::JournalTests::test_languages_without_v350", "tests/test_document.py::JournalTests::test_last_cnn_code_1", "tests/test_document.py::JournalTests::test_last_number", "tests/test_document.py::JournalTests::test_last_number_1", "tests/test_document.py::JournalTests::test_last_volume", "tests/test_document.py::JournalTests::test_last_volume_1", "tests/test_document.py::JournalTests::test_last_year", "tests/test_document.py::JournalTests::test_last_year_1", "tests/test_document.py::JournalTests::test_last_year_2", "tests/test_document.py::JournalTests::test_last_year_3", "tests/test_document.py::JournalTests::test_last_year_4", "tests/test_document.py::JournalTests::test_load_issn_with_v435", "tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_ONLINE", "tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_PRINT", "tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_ONLINE", "tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_PRINT", "tests/test_document.py::JournalTests::test_load_issn_with_v935_without_v35", "tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_ONLINE", "tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_PRINT", "tests/test_document.py::JournalTests::test_load_issn_without_v935_without_v35", "tests/test_document.py::JournalTests::test_periodicity", "tests/test_document.py::JournalTests::test_periodicity_in_months", "tests/test_document.py::JournalTests::test_periodicity_in_months_out_of_choices", "tests/test_document.py::JournalTests::test_periodicity_out_of_choices", "tests/test_document.py::JournalTests::test_permission_id", "tests/test_document.py::JournalTests::test_permission_t0", "tests/test_document.py::JournalTests::test_permission_t1", "tests/test_document.py::JournalTests::test_permission_t2", "tests/test_document.py::JournalTests::test_permission_t3", "tests/test_document.py::JournalTests::test_permission_t4", "tests/test_document.py::JournalTests::test_permission_text", "tests/test_document.py::JournalTests::test_permission_url", "tests/test_document.py::JournalTests::test_permission_without_v540", "tests/test_document.py::JournalTests::test_permission_without_v540_t", "tests/test_document.py::JournalTests::test_plevel", "tests/test_document.py::JournalTests::test_plevel_out_of_choices", "tests/test_document.py::JournalTests::test_previous_title", "tests/test_document.py::JournalTests::test_previous_title_without_data", "tests/test_document.py::JournalTests::test_publisher_city", "tests/test_document.py::JournalTests::test_publisher_loc", "tests/test_document.py::JournalTests::test_publisher_name", "tests/test_document.py::JournalTests::test_publisher_state", "tests/test_document.py::JournalTests::test_scielo_issn", "tests/test_document.py::JournalTests::test_secs_code", "tests/test_document.py::JournalTests::test_standard", "tests/test_document.py::JournalTests::test_standard_out_of_choices", "tests/test_document.py::JournalTests::test_status", "tests/test_document.py::JournalTests::test_status_lots_of_changes", "tests/test_document.py::JournalTests::test_status_lots_of_changes_study_case_1", "tests/test_document.py::JournalTests::test_status_lots_of_changes_with_reason", "tests/test_document.py::JournalTests::test_status_some_changes", "tests/test_document.py::JournalTests::test_status_without_v51", "tests/test_document.py::JournalTests::test_subject_areas", "tests/test_document.py::JournalTests::test_subject_descriptors", "tests/test_document.py::JournalTests::test_subject_index_coverage", "tests/test_document.py::JournalTests::test_submission_url", "tests/test_document.py::JournalTests::test_update_date", "tests/test_document.py::JournalTests::test_without_ctrl_vocabulary", "tests/test_document.py::JournalTests::test_without_index_coverage", "tests/test_document.py::JournalTests::test_without_institutional_url", "tests/test_document.py::JournalTests::test_without_journal_abbreviated_title", "tests/test_document.py::JournalTests::test_without_journal_acronym", "tests/test_document.py::JournalTests::test_without_journal_title", "tests/test_document.py::JournalTests::test_without_journal_title_nlm", "tests/test_document.py::JournalTests::test_without_journal_url", "tests/test_document.py::JournalTests::test_without_periodicity", "tests/test_document.py::JournalTests::test_without_periodicity_in_months", "tests/test_document.py::JournalTests::test_without_plevel", "tests/test_document.py::JournalTests::test_without_publisher_city", "tests/test_document.py::JournalTests::test_without_publisher_loc", "tests/test_document.py::JournalTests::test_without_publisher_name", "tests/test_document.py::JournalTests::test_without_publisher_state", "tests/test_document.py::JournalTests::test_without_scielo_domain", "tests/test_document.py::JournalTests::test_without_scielo_domain_title_v690", "tests/test_document.py::JournalTests::test_without_secs_code", "tests/test_document.py::JournalTests::test_without_standard", "tests/test_document.py::JournalTests::test_without_subject_areas", "tests/test_document.py::JournalTests::test_without_subject_descriptors", "tests/test_document.py::JournalTests::test_without_wos_citation_indexes", "tests/test_document.py::JournalTests::test_without_wos_subject_areas", "tests/test_document.py::JournalTests::test_wos_citation_indexes", "tests/test_document.py::JournalTests::test_wos_subject_areas", "tests/test_document.py::ArticleTests::test_abstracts", "tests/test_document.py::ArticleTests::test_abstracts_iso639_2", "tests/test_document.py::ArticleTests::test_abstracts_without_v83", "tests/test_document.py::ArticleTests::test_acceptance_date", "tests/test_document.py::ArticleTests::test_affiliation_just_with_affiliation_name", "tests/test_document.py::ArticleTests::test_affiliation_with_country_iso_3166", "tests/test_document.py::ArticleTests::test_affiliation_without_affiliation_name", "tests/test_document.py::ArticleTests::test_affiliations", "tests/test_document.py::ArticleTests::test_ahead_publication_date", "tests/test_document.py::ArticleTests::test_article", "tests/test_document.py::ArticleTests::test_author_with_two_affiliations", "tests/test_document.py::ArticleTests::test_author_with_two_role", "tests/test_document.py::ArticleTests::test_author_without_affiliations", "tests/test_document.py::ArticleTests::test_author_without_surname_and_given_names", "tests/test_document.py::ArticleTests::test_authors", "tests/test_document.py::ArticleTests::test_collection_acronym", "tests/test_document.py::ArticleTests::test_collection_acronym_priorizing_collection", "tests/test_document.py::ArticleTests::test_collection_acronym_retrieving_v992", "tests/test_document.py::ArticleTests::test_collection_name_brazil", "tests/test_document.py::ArticleTests::test_collection_name_undefined", "tests/test_document.py::ArticleTests::test_corporative_authors", "tests/test_document.py::ArticleTests::test_creation_date", "tests/test_document.py::ArticleTests::test_creation_date_1", "tests/test_document.py::ArticleTests::test_creation_date_2", "tests/test_document.py::ArticleTests::test_data_model_version_html", "tests/test_document.py::ArticleTests::test_data_model_version_html_1", "tests/test_document.py::ArticleTests::test_data_model_version_xml", "tests/test_document.py::ArticleTests::test_document_type", "tests/test_document.py::ArticleTests::test_document_without_issue_metadata", "tests/test_document.py::ArticleTests::test_document_without_journal_metadata", "tests/test_document.py::ArticleTests::test_doi", "tests/test_document.py::ArticleTests::test_doi_clean_1", "tests/test_document.py::ArticleTests::test_doi_clean_2", "tests/test_document.py::ArticleTests::test_doi_v237", "tests/test_document.py::ArticleTests::test_e_location", "tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_1", "tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_2", "tests/test_document.py::ArticleTests::test_end_page_loaded_through_xml", "tests/test_document.py::ArticleTests::test_file_code", "tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_1", "tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_2", "tests/test_document.py::ArticleTests::test_first_author", "tests/test_document.py::ArticleTests::test_first_author_without_author", "tests/test_document.py::ArticleTests::test_fulltexts_field_fulltexts", "tests/test_document.py::ArticleTests::test_fulltexts_without_field_fulltexts", "tests/test_document.py::ArticleTests::test_html_url", "tests/test_document.py::ArticleTests::test_invalid_document_type", "tests/test_document.py::ArticleTests::test_issue_url", "tests/test_document.py::ArticleTests::test_journal_abbreviated_title", "tests/test_document.py::ArticleTests::test_keywords", "tests/test_document.py::ArticleTests::test_keywords_iso639_2", "tests/test_document.py::ArticleTests::test_keywords_with_undefined_language", "tests/test_document.py::ArticleTests::test_keywords_without_subfield_k", "tests/test_document.py::ArticleTests::test_keywords_without_subfield_l", "tests/test_document.py::ArticleTests::test_languages_field_fulltexts", "tests/test_document.py::ArticleTests::test_languages_field_v40", "tests/test_document.py::ArticleTests::test_last_page", "tests/test_document.py::ArticleTests::test_mixed_affiliations_1", "tests/test_document.py::ArticleTests::test_normalized_affiliations", "tests/test_document.py::ArticleTests::test_normalized_affiliations_undefined_ISO_3166_CODE", "tests/test_document.py::ArticleTests::test_normalized_affiliations_without_p", "tests/test_document.py::ArticleTests::test_order", "tests/test_document.py::ArticleTests::test_original_abstract_with_just_one_language_defined", "tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined", "tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined_but_different_of_the_article_original_language", "tests/test_document.py::ArticleTests::test_original_abstract_without_language_defined", "tests/test_document.py::ArticleTests::test_original_html_field_body", "tests/test_document.py::ArticleTests::test_original_language_invalid_iso639_2", "tests/test_document.py::ArticleTests::test_original_language_iso639_2", "tests/test_document.py::ArticleTests::test_original_language_original", "tests/test_document.py::ArticleTests::test_original_section_field_v49", "tests/test_document.py::ArticleTests::test_original_title_subfield_t", "tests/test_document.py::ArticleTests::test_original_title_with_just_one_language_defined", "tests/test_document.py::ArticleTests::test_original_title_with_language_defined", "tests/test_document.py::ArticleTests::test_original_title_with_language_defined_but_different_of_the_article_original_language", "tests/test_document.py::ArticleTests::test_original_title_without_language_defined", "tests/test_document.py::ArticleTests::test_pdf_url", "tests/test_document.py::ArticleTests::test_processing_date", "tests/test_document.py::ArticleTests::test_processing_date_1", "tests/test_document.py::ArticleTests::test_project_name", "tests/test_document.py::ArticleTests::test_project_sponsors", "tests/test_document.py::ArticleTests::test_publication_contract", "tests/test_document.py::ArticleTests::test_publication_date_with_article_date", "tests/test_document.py::ArticleTests::test_publication_date_without_article_date", "tests/test_document.py::ArticleTests::test_publisher_ahead_id", "tests/test_document.py::ArticleTests::test_publisher_ahead_id_none", "tests/test_document.py::ArticleTests::test_publisher_id", "tests/test_document.py::ArticleTests::test_receive_date", "tests/test_document.py::ArticleTests::test_review_date", "tests/test_document.py::ArticleTests::test_section_code_field_v49", "tests/test_document.py::ArticleTests::test_section_code_nd_field_v49", "tests/test_document.py::ArticleTests::test_section_code_without_field_v49", "tests/test_document.py::ArticleTests::test_section_field_v49", "tests/test_document.py::ArticleTests::test_section_nd_field_v49", "tests/test_document.py::ArticleTests::test_section_without_field_section", "tests/test_document.py::ArticleTests::test_section_without_field_v49", "tests/test_document.py::ArticleTests::test_start_page", "tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_1", "tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_2", "tests/test_document.py::ArticleTests::test_start_page_loaded_through_xml", "tests/test_document.py::ArticleTests::test_start_page_sec", "tests/test_document.py::ArticleTests::test_start_page_sec_0", "tests/test_document.py::ArticleTests::test_start_page_sec_0_loaded_through_xml", "tests/test_document.py::ArticleTests::test_start_page_sec_loaded_through_xml", "tests/test_document.py::ArticleTests::test_subject_areas", "tests/test_document.py::ArticleTests::test_thesis_degree", "tests/test_document.py::ArticleTests::test_thesis_organization", "tests/test_document.py::ArticleTests::test_thesis_organization_and_division", "tests/test_document.py::ArticleTests::test_thesis_organization_without_name", "tests/test_document.py::ArticleTests::test_translated_abstracts", "tests/test_document.py::ArticleTests::test_translated_abstracts_without_v83", "tests/test_document.py::ArticleTests::test_translated_abtracts_iso639_2", "tests/test_document.py::ArticleTests::test_translated_htmls_field_body", "tests/test_document.py::ArticleTests::test_translated_section_field_v49", "tests/test_document.py::ArticleTests::test_translated_titles", "tests/test_document.py::ArticleTests::test_translated_titles_iso639_2", "tests/test_document.py::ArticleTests::test_translated_titles_without_v12", "tests/test_document.py::ArticleTests::test_update_date", "tests/test_document.py::ArticleTests::test_update_date_1", "tests/test_document.py::ArticleTests::test_update_date_2", "tests/test_document.py::ArticleTests::test_update_date_3", "tests/test_document.py::ArticleTests::test_whitwout_acceptance_date", "tests/test_document.py::ArticleTests::test_whitwout_ahead_publication_date", "tests/test_document.py::ArticleTests::test_whitwout_receive_date", "tests/test_document.py::ArticleTests::test_whitwout_review_date", "tests/test_document.py::ArticleTests::test_without_affiliations", "tests/test_document.py::ArticleTests::test_without_authors", "tests/test_document.py::ArticleTests::test_without_citations", "tests/test_document.py::ArticleTests::test_without_collection_acronym", "tests/test_document.py::ArticleTests::test_without_corporative_authors", "tests/test_document.py::ArticleTests::test_without_document_type", "tests/test_document.py::ArticleTests::test_without_doi", "tests/test_document.py::ArticleTests::test_without_e_location", "tests/test_document.py::ArticleTests::test_without_html_url", "tests/test_document.py::ArticleTests::test_without_issue_url", "tests/test_document.py::ArticleTests::test_without_journal_abbreviated_title", "tests/test_document.py::ArticleTests::test_without_keywords", "tests/test_document.py::ArticleTests::test_without_last_page", "tests/test_document.py::ArticleTests::test_without_normalized_affiliations", "tests/test_document.py::ArticleTests::test_without_order", "tests/test_document.py::ArticleTests::test_without_original_abstract", "tests/test_document.py::ArticleTests::test_without_original_title", "tests/test_document.py::ArticleTests::test_without_pages", "tests/test_document.py::ArticleTests::test_without_pdf_url", "tests/test_document.py::ArticleTests::test_without_processing_date", "tests/test_document.py::ArticleTests::test_without_project_name", "tests/test_document.py::ArticleTests::test_without_project_sponsor", "tests/test_document.py::ArticleTests::test_without_publication_contract", "tests/test_document.py::ArticleTests::test_without_publication_date", "tests/test_document.py::ArticleTests::test_without_publisher_id", "tests/test_document.py::ArticleTests::test_without_scielo_domain", "tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69", "tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69_and_with_title_v690", "tests/test_document.py::ArticleTests::test_without_scielo_domain_title_v690", "tests/test_document.py::ArticleTests::test_without_start_page", "tests/test_document.py::ArticleTests::test_without_subject_areas", "tests/test_document.py::ArticleTests::test_without_thesis_degree", "tests/test_document.py::ArticleTests::test_without_thesis_organization", "tests/test_document.py::ArticleTests::test_without_wos_citation_indexes", "tests/test_document.py::ArticleTests::test_without_wos_subject_areas", "tests/test_document.py::ArticleTests::test_wos_citation_indexes", "tests/test_document.py::ArticleTests::test_wos_subject_areas", "tests/test_document.py::CitationTest::test_a_link_access_date", "tests/test_document.py::CitationTest::test_a_link_access_date_absent_v65", "tests/test_document.py::CitationTest::test_analytic_institution_authors_for_a_book_citation", "tests/test_document.py::CitationTest::test_analytic_institution_authors_for_an_article_citation", "tests/test_document.py::CitationTest::test_analytic_institution_for_a_article_citation", "tests/test_document.py::CitationTest::test_analytic_institution_for_a_book_citation", "tests/test_document.py::CitationTest::test_analytic_person_authors", "tests/test_document.py::CitationTest::test_article_title", "tests/test_document.py::CitationTest::test_article_without_title", "tests/test_document.py::CitationTest::test_authors_article", "tests/test_document.py::CitationTest::test_authors_book", "tests/test_document.py::CitationTest::test_authors_link", "tests/test_document.py::CitationTest::test_authors_thesis", "tests/test_document.py::CitationTest::test_book_chapter_title", "tests/test_document.py::CitationTest::test_book_edition", "tests/test_document.py::CitationTest::test_book_volume", "tests/test_document.py::CitationTest::test_book_without_chapter_title", "tests/test_document.py::CitationTest::test_citation_sample_congress", "tests/test_document.py::CitationTest::test_citation_sample_link", "tests/test_document.py::CitationTest::test_citation_sample_link_without_comment", "tests/test_document.py::CitationTest::test_conference_edition", "tests/test_document.py::CitationTest::test_conference_name", "tests/test_document.py::CitationTest::test_conference_sponsor", "tests/test_document.py::CitationTest::test_conference_without_name", "tests/test_document.py::CitationTest::test_conference_without_sponsor", "tests/test_document.py::CitationTest::test_date", "tests/test_document.py::CitationTest::test_doi", "tests/test_document.py::CitationTest::test_editor", "tests/test_document.py::CitationTest::test_elocation_14", "tests/test_document.py::CitationTest::test_elocation_514", "tests/test_document.py::CitationTest::test_end_page_14", "tests/test_document.py::CitationTest::test_end_page_514", "tests/test_document.py::CitationTest::test_end_page_withdout_data", "tests/test_document.py::CitationTest::test_first_author_article", "tests/test_document.py::CitationTest::test_first_author_book", "tests/test_document.py::CitationTest::test_first_author_link", "tests/test_document.py::CitationTest::test_first_author_thesis", "tests/test_document.py::CitationTest::test_first_author_without_monographic_authors", "tests/test_document.py::CitationTest::test_first_author_without_monographic_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_index_number", "tests/test_document.py::CitationTest::test_institutions_all_fields", "tests/test_document.py::CitationTest::test_institutions_v11", "tests/test_document.py::CitationTest::test_institutions_v17", "tests/test_document.py::CitationTest::test_institutions_v29", "tests/test_document.py::CitationTest::test_institutions_v50", "tests/test_document.py::CitationTest::test_institutions_v58", "tests/test_document.py::CitationTest::test_invalid_edition", "tests/test_document.py::CitationTest::test_isbn", "tests/test_document.py::CitationTest::test_isbn_but_not_a_book", "tests/test_document.py::CitationTest::test_issn", "tests/test_document.py::CitationTest::test_issn_but_not_an_article", "tests/test_document.py::CitationTest::test_issue_part", "tests/test_document.py::CitationTest::test_issue_title", "tests/test_document.py::CitationTest::test_journal_issue", "tests/test_document.py::CitationTest::test_journal_volume", "tests/test_document.py::CitationTest::test_link", "tests/test_document.py::CitationTest::test_link_title", "tests/test_document.py::CitationTest::test_link_without_title", "tests/test_document.py::CitationTest::test_mixed_citation_1", "tests/test_document.py::CitationTest::test_mixed_citation_10", "tests/test_document.py::CitationTest::test_mixed_citation_11", "tests/test_document.py::CitationTest::test_mixed_citation_12", "tests/test_document.py::CitationTest::test_mixed_citation_13", "tests/test_document.py::CitationTest::test_mixed_citation_14", "tests/test_document.py::CitationTest::test_mixed_citation_15", "tests/test_document.py::CitationTest::test_mixed_citation_16", "tests/test_document.py::CitationTest::test_mixed_citation_17", "tests/test_document.py::CitationTest::test_mixed_citation_18", "tests/test_document.py::CitationTest::test_mixed_citation_19", "tests/test_document.py::CitationTest::test_mixed_citation_2", "tests/test_document.py::CitationTest::test_mixed_citation_3", "tests/test_document.py::CitationTest::test_mixed_citation_4", "tests/test_document.py::CitationTest::test_mixed_citation_5", "tests/test_document.py::CitationTest::test_mixed_citation_6", "tests/test_document.py::CitationTest::test_mixed_citation_7", "tests/test_document.py::CitationTest::test_mixed_citation_8", "tests/test_document.py::CitationTest::test_mixed_citation_9", "tests/test_document.py::CitationTest::test_mixed_citation_without_data", "tests/test_document.py::CitationTest::test_monographic_authors", "tests/test_document.py::CitationTest::test_monographic_first_author", "tests/test_document.py::CitationTest::test_monographic_institution_authors_for_a_book_citation", "tests/test_document.py::CitationTest::test_monographic_institution_authors_for_an_article_citation", "tests/test_document.py::CitationTest::test_monographic_person_authors", "tests/test_document.py::CitationTest::test_pages_14", "tests/test_document.py::CitationTest::test_pages_514", "tests/test_document.py::CitationTest::test_pages_withdout_data", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_analytic_authors", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_analytic_institution", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_monographic_authors", "tests/test_document.py::CitationTest::test_pending_deprecation_warning_of_monographic_institution", "tests/test_document.py::CitationTest::test_publication_type_article", "tests/test_document.py::CitationTest::test_publication_type_book", "tests/test_document.py::CitationTest::test_publication_type_book_chapter", "tests/test_document.py::CitationTest::test_publication_type_conference", "tests/test_document.py::CitationTest::test_publication_type_link", "tests/test_document.py::CitationTest::test_publication_type_thesis", "tests/test_document.py::CitationTest::test_publication_type_undefined", "tests/test_document.py::CitationTest::test_publisher", "tests/test_document.py::CitationTest::test_publisher_address", "tests/test_document.py::CitationTest::test_publisher_address_without_e", "tests/test_document.py::CitationTest::test_series_book", "tests/test_document.py::CitationTest::test_series_but_neither_journal_book_or_conference_citation", "tests/test_document.py::CitationTest::test_series_conference", "tests/test_document.py::CitationTest::test_series_journal", "tests/test_document.py::CitationTest::test_source_book_title", "tests/test_document.py::CitationTest::test_source_journal", "tests/test_document.py::CitationTest::test_source_journal_without_journal_title", "tests/test_document.py::CitationTest::test_sponsor", "tests/test_document.py::CitationTest::test_start_page_14", "tests/test_document.py::CitationTest::test_start_page_514", "tests/test_document.py::CitationTest::test_start_page_withdout_data", "tests/test_document.py::CitationTest::test_thesis_institution", "tests/test_document.py::CitationTest::test_thesis_title", "tests/test_document.py::CitationTest::test_thesis_without_title", "tests/test_document.py::CitationTest::test_title_when_article_citation", "tests/test_document.py::CitationTest::test_title_when_conference_citation", "tests/test_document.py::CitationTest::test_title_when_link_citation", "tests/test_document.py::CitationTest::test_title_when_thesis_citation", "tests/test_document.py::CitationTest::test_with_volume_but_not_a_journal_article_neither_a_book", "tests/test_document.py::CitationTest::test_without_analytic_institution", "tests/test_document.py::CitationTest::test_without_analytic_person_authors", "tests/test_document.py::CitationTest::test_without_analytic_person_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_without_authors", "tests/test_document.py::CitationTest::test_without_date", "tests/test_document.py::CitationTest::test_without_doi", "tests/test_document.py::CitationTest::test_without_edition", "tests/test_document.py::CitationTest::test_without_editor", "tests/test_document.py::CitationTest::test_without_first_author", "tests/test_document.py::CitationTest::test_without_index_number", "tests/test_document.py::CitationTest::test_without_institutions", "tests/test_document.py::CitationTest::test_without_issue", "tests/test_document.py::CitationTest::test_without_issue_part", "tests/test_document.py::CitationTest::test_without_issue_title", "tests/test_document.py::CitationTest::test_without_link", "tests/test_document.py::CitationTest::test_without_monographic_authors", "tests/test_document.py::CitationTest::test_without_monographic_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_without_monographic_person_authors", "tests/test_document.py::CitationTest::test_without_monographic_person_authors_but_not_a_book_citation", "tests/test_document.py::CitationTest::test_without_publisher", "tests/test_document.py::CitationTest::test_without_publisher_address", "tests/test_document.py::CitationTest::test_without_series", "tests/test_document.py::CitationTest::test_without_sponsor", "tests/test_document.py::CitationTest::test_without_thesis_institution", "tests/test_document.py::CitationTest::test_without_volume" ]
[]
BSD 2-Clause "Simplified" License
2,867
[ "xylose/scielodocument.py" ]
[ "xylose/scielodocument.py" ]
rm-hull__luma.led_matrix-158
8b9323d8272d4789bbcef9b031573fa582cfcc23
2018-08-03 21:31:20
8b9323d8272d4789bbcef9b031573fa582cfcc23
rm-hull: py27 build fails because of different unicode/utf-8 handling .. any ideas @thijstriemstra ? thijstriemstra: oh the (co-)author should be added to contributors? thijstriemstra: > py27 build fails because of different unicode/utf-8 handling let me check. thijstriemstra: Took another look but can't figure it out, since I'm not sure what 'dot_muncher' does. Perhaps you can comment it line by line in this PR and it'll become more clear. rm-hull: ### Notes Seems like python2 doesn't properly iterate over unicode characters (but python 3 does), and this is the root cause of the problem. * https://stackoverflow.com/questions/46711888/how-to-properly-iterate-over-unicode-characters-in-python ```python Python 2.7.15 (default, Jun 17 2018, 12:46:58) [GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.2)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> x = "29.12°C" >>> len(x) 8 >>> list(x) ['2', '9', '.', '1', '2', '\xc2', '\xb0', 'C'] >>> y = u"29.12°C" >>> len(y) 7 >>> list(y) [u'2', u'9', u'.', u'1', u'2', u'\xb0', u'C'] ``` vs. ```python3 Python 3.7.0 (default, Jun 29 2018, 20:13:13) [Clang 9.1.0 (clang-902.0.39.2)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> x = "29.12°C" >>> len(x) 7 >>> list(x) ['2', '9', '.', '1', '2', '°', 'C'] ```
diff --git a/.gitignore b/.gitignore index 04ce1c3..92614be 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,7 @@ htmlcov/ .cache nosetests.xml coverage.xml +.pytest_cache/ # Translations *.mo diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 3f474bb..f86c1c5 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,3 +22,4 @@ Contributors * Qinkang Huang (@pokebox) * Shawn Woodford (@swoodford) * Phil Howard (@gadgetoid) +* Petr Kracík (@petrkr) diff --git a/luma/led_matrix/segment_mapper.py b/luma/led_matrix/segment_mapper.py index 91c3447..f8e22cf 100644 --- a/luma/led_matrix/segment_mapper.py +++ b/luma/led_matrix/segment_mapper.py @@ -6,6 +6,8 @@ _DIGITS = { ' ': 0x00, '-': 0x01, '_': 0x08, + u'°': 0x63, + '\xb0': 0x63, '\'': 0x02, '0': 0x7e, '1': 0x30, @@ -86,6 +88,9 @@ def regular(text, notfound="_"): def dot_muncher(text, notfound="_"): + if not text: + return + undefined = _DIGITS[notfound] iterator = iter(text) last = _DIGITS.get(next(iterator), undefined) diff --git a/setup.py b/setup.py index 60a671d..84fb47a 100644 --- a/setup.py +++ b/setup.py @@ -50,7 +50,7 @@ setup( url="https://github.com/rm-hull/luma.led_matrix", download_url="https://github.com/rm-hull/luma.led_matrix/tarball/" + version, packages=["luma", "luma.led_matrix"], - install_requires=["luma.core>=1.2.1"], + install_requires=["luma.core>=1.8.1"], setup_requires=pytest_runner, tests_require=test_deps, extras_require={
Add degree symbol to charmap Add degree symbol to charmap, so we can use this library to show temperature. ```diff diff --git a/luma/led_matrix/segment_mapper.py b/luma/led_matrix/segment_mapper.py index 91c3447..d6a8e5f 100644 --- a/luma/led_matrix/segment_mapper.py +++ b/luma/led_matrix/segment_mapper.py @@ -6,6 +6,7 @@ _DIGITS = { ' ': 0x00, '-': 0x01, '_': 0x08, + '°': 0x63, '\'': 0x02, '0': 0x7e, '1': 0x30, ``` ![image](https://user-images.githubusercontent.com/6428351/43455412-004f493c-94c0-11e8-9d43-a35c2d056474.png)
rm-hull/luma.led_matrix
diff --git a/tests/test_segment_mapper.py b/tests/test_segment_mapper.py index c32cf19..7ef4e20 100644 --- a/tests/test_segment_mapper.py +++ b/tests/test_segment_mapper.py @@ -62,3 +62,21 @@ def test_regular_empty_buf(): buf = mutable_string("") results = regular(buf) assert list(results) == [] + + +def test_degrees_string(): + buf = mutable_string("29.12\xb0C") + results = dot_muncher(buf) + assert list(results) == [0x6d, 0x7b | 0x80, 0x30, 0x6d, 0x63, 0x4e] + + +def test_degrees_unicode(): + buf = mutable_string(u"29.12°C") + results = dot_muncher(buf) + assert list(results) == [0x6d, 0x7b | 0x80, 0x30, 0x6d, 0x63, 0x4e] + + +def test_degrees_utf8(): + buf = mutable_string(u"29.12\xb0C") + results = dot_muncher(buf) + assert list(results) == [0x6d, 0x7b | 0x80, 0x30, 0x6d, 0x63, 0x4e]
{ "commit_name": "merge_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 4 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y build-essential python3-dev libfreetype6-dev libjpeg-dev" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cbor2==5.6.5 coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 luma.core==2.4.2 -e git+https://github.com/rm-hull/luma.led_matrix.git@8b9323d8272d4789bbcef9b031573fa582cfcc23#egg=luma.led_matrix packaging==24.2 pillow==11.1.0 pluggy==1.5.0 pyftdi==0.56.0 pyserial==3.5 pytest==8.3.5 pytest-cov==6.0.0 pyusb==1.3.1 RPi.GPIO==0.7.1 smbus2==0.5.0 spidev==3.6 tomli==2.2.1
name: luma.led_matrix channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cbor2==5.6.5 - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - luma-core==2.4.2 - packaging==24.2 - pillow==11.1.0 - pluggy==1.5.0 - pyftdi==0.56.0 - pyserial==3.5 - pytest==8.3.5 - pytest-cov==6.0.0 - pyusb==1.3.1 - rpi-gpio==0.7.1 - smbus2==0.5.0 - spidev==3.6 - tomli==2.2.1 prefix: /opt/conda/envs/luma.led_matrix
[ "tests/test_segment_mapper.py::test_dot_muncher_empty_buf", "tests/test_segment_mapper.py::test_degrees_string", "tests/test_segment_mapper.py::test_degrees_unicode", "tests/test_segment_mapper.py::test_degrees_utf8" ]
[]
[ "tests/test_segment_mapper.py::test_dot_muncher_without_dots", "tests/test_segment_mapper.py::test_dot_muncher_with_dot", "tests/test_segment_mapper.py::test_dot_muncher_with_dot_at_end", "tests/test_segment_mapper.py::test_dot_muncher_with_multiple_dot", "tests/test_segment_mapper.py::test_regular_without_dots", "tests/test_segment_mapper.py::test_regular_with_dot", "tests/test_segment_mapper.py::test_regular_with_multiple_dot", "tests/test_segment_mapper.py::test_regular_empty_buf" ]
[]
MIT License
2,868
[ "setup.py", ".gitignore", "luma/led_matrix/segment_mapper.py", "CONTRIBUTING.rst" ]
[ "setup.py", ".gitignore", "luma/led_matrix/segment_mapper.py", "CONTRIBUTING.rst" ]
sigmavirus24__github3.py-880
37b3cf99960523256d42c2ba512c11c74731123b
2018-08-03 22:05:21
b8e7aa8eb221cd1eec7a8bc002b75de8098dc77a
jacquerie: IMO this is ready for review! Perhaps @omgjlk would like to take a look, given https://github.com/sigmavirus24/github3.py/issues/748#issuecomment-352588562 ?
diff --git a/src/github3/github.py b/src/github3/github.py index ce22a5fb..3cb9f75c 100644 --- a/src/github3/github.py +++ b/src/github3/github.py @@ -1669,6 +1669,87 @@ class GitHub(models.GitHubCore): number, url, search.CodeSearchResult, self, params, etag, headers ) + def search_commits(self, query, sort=None, order=None, per_page=None, + text_match=False, number=-1, etag=None): + """Find commits via the commits search API. + + The query can contain any combination of the following supported + qualifiers: + + - ``author`` Matches commits authored by the given username. + Example: ``author:defunkt``. + - ``committer`` Matches commits committed by the given username. + Example: ``committer:defunkt``. + - ``author-name`` Matches commits authored by a user with the given + name. Example: ``author-name:wanstrath``. + - ``committer-name`` Matches commits committed by a user with the given + name. Example: ``committer-name:wanstrath``. + - ``author-email`` Matches commits authored by a user with the given + email. Example: ``author-email:[email protected]``. + - ``committer-email`` Matches commits committed by a user with the + given email. Example: ``committer-email:[email protected]``. + - ``author-date`` Matches commits authored within the specified date + range. Example: ``author-date:<2016-01-01``. + - ``committer-date`` Matches commits committed within the specified + date range. Example: ``committer-date:>2016-01-01``. + - ``merge`` Matches merge commits when set to to ``true``, excludes + them when set to ``false``. + - ``hash`` Matches commits with the specified hash. Example: + ``hash:124a9a0ee1d8f1e15e833aff432fbb3b02632105``. + - ``parent`` Matches commits whose parent has the specified hash. + Example: ``parent:124a9a0ee1d8f1e15e833aff432fbb3b02632105``. + - ``tree`` Matches commits with the specified tree hash. Example: + ``tree:99ca967``. + - ``is`` Matches public repositories when set to ``public``, private + repositories when set to ``private``. + - ``user`` or ``org`` or ``repo`` Limits the search to a specific user, + organization, or repository. + + For more information about these qualifiers, see: https://git.io/vb7XQ + + :param str query: + (required), a valid query as described above, e.g., + ``css repo:octocat/Spoon-Knife`` + :param str sort: + (optional), how the results should be sorted; + options: ``author-date``, ``committer-date``; + default: best match + :param str order: + (optional), the direction of the sorted results, + options: ``asc``, ``desc``; default: ``desc`` + :param int per_page: + (optional) + :param int number: + (optional), number of commits to return. + Default: -1, returns all available commits + :param str etag: + (optional), previous ETag header value + :return: + generator of commit search results + :rtype: + :class:`~github3.search.commits.CommitSearchResult` + """ + params = {'q': query} + headers = {'Accept': 'application/vnd.github.cloak-preview'} + + if sort in ('author-date', 'committer-date'): + params['sort'] = sort + + if sort and order in ('asc', 'desc'): + params['order'] = order + + if text_match: + headers['Accept'] = ', '.join([ + headers['Accept'], + 'application/vnd.github.v3.full.text-match+json' + ]) + + url = self._build_url('search', 'commits') + return structs.SearchIterator( + number, url, search.CommitSearchResult, + self, params, etag, headers + ) + def search_issues(self, query, sort=None, order=None, per_page=None, text_match=False, number=-1, etag=None): """Find issues by state and keyword. diff --git a/src/github3/search/__init__.py b/src/github3/search/__init__.py index 97f776b8..37ea15fa 100644 --- a/src/github3/search/__init__.py +++ b/src/github3/search/__init__.py @@ -1,8 +1,14 @@ from .code import CodeSearchResult +from .commit import CommitSearchResult from .issue import IssueSearchResult from .repository import RepositorySearchResult from .user import UserSearchResult -__all__ = [CodeSearchResult, IssueSearchResult, RepositorySearchResult, - UserSearchResult] +__all__ = ( + 'CodeSearchResult', + 'CommitSearchResult', + 'IssueSearchResult', + 'RepositorySearchResult', + 'UserSearchResult', +) diff --git a/src/github3/search/commit.py b/src/github3/search/commit.py new file mode 100644 index 00000000..7f694146 --- /dev/null +++ b/src/github3/search/commit.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +"""Commit search results implementation.""" +from __future__ import unicode_literals + +from .. import git +from .. import models +from .. import repos +from .. import users + + +class CommitSearchResult(models.GitHubCore): + """A representation of a commit search result from the API. + + This object has the following attributes: + + .. attribute:: author + + A :class:`~github3.users.ShortUser` representing the user who + authored the found commit. + + .. attribute:: comments_url + + The URL to retrieve the comments on the found commit from the API. + + .. attribute:: commit + + A :class:`~github3.git.ShortCommit` representing the found commit. + + .. attribute:: committer + + A :class:`~github3.users.ShortUser` representing the user who + committed the found commit. + + .. attribute:: html_url + + The URL to view the found commit in a browser. + + .. attribute:: repository + + A :class:`~github3.repos.repo.ShortRepository` representing the + repository in which the commit was found. + + .. attribute:: score + + The confidence score assigned to the result. + + .. attribute:: sha + + The SHA1 of the found commit. + + .. attribute:: text_matches + + A list of the text matches in the commit that generated this result. + + .. note:: + + To receive these, you must pass ``text_match=True`` to + :meth:`~github3.github.GitHub.search_commit`. + """ + + def _update_attributes(self, data): + self._api = data['url'] + self.author = users.ShortUser(data['author'], self) + self.comments_url = data['comments_url'] + self.commit = git.ShortCommit(data['commit'], self) + self.committer = users.ShortUser(data['committer'], self) + self.html_url = data['html_url'] + self.repository = repos.ShortRepository(data['repository'], self) + self.score = data['score'] + self.sha = data['sha'] + self.text_matches = data.get('text_matches', []) + + def _repr(self): + return '<CommitSearchResult [{0}]>'.format(self.sha[:7])
Support for searching commits Any plans to add support for searching commits? ``` https://developer.github.com/v3/search/#search-commits ```
sigmavirus24/github3.py
diff --git a/tests/cassettes/GitHub_search_commits.json b/tests/cassettes/GitHub_search_commits.json new file mode 100644 index 00000000..39082376 --- /dev/null +++ b/tests/cassettes/GitHub_search_commits.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.cloak-preview"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"]}, "method": "GET", "uri": "https://api.github.com/search/commits?q=css+repo%3Aoctocat%2FSpoon-Knife&per_page=100"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1ZXW/iRhT9K4jXJtjGhthI0XbVpKttRaKk7HaTaoXG4zEesD3WzDgUrPz33hkbA1EIYLbtS15CMD5n7sfM9bnXRVsyieIxZnkq2wPrrE1TzJIsJpKMORF5LEV7EKJYEPhJkgS+/VW0cx63B+1IykwMDANltDOhMsr9DmANTjImDIYlw0gaf2SMpee/pzQkBvyaUCkM33cwdgPb7xLL8W0U9oKw73nuRT8InDC0EQ5M078I22dtESFY6QhAygIypgGAhlcP/buuJ/0/Y3M4Gs6HV9e94TSjD9P76cMymg27nxePo2vzYXoTPU6/zh6nD73b0cS+Se5nN58ek2H31+nj1cf5wzRIwJJIJvF42/ENp3e7e4y3KkAkleLFOv9agHVG1ILgX5mc9qBxdiEaTTKMchkxrtYNkCSQt65pOedm99x0RpYzsN2B3T833YFpgpEpStQto4i0bssNBhdJgqjaj1UOfk7ZPCKcqM3Yfl45Jslra1jdkdUbWO6g1ztpjYQIgSbKtF84ATdaQi5iIjpYiBZKg1aeKeeC1v31x6vhNdgsOYG7T4q1ohAG6tse8fqhZ9ruhXcR+qhPTKfvhZ6DHN/E2Ol5Ieq79Vk6GFCFDrZHXR4suLbOV8wmNF3HHVZQ567n2l0bCsnmQbzrf/12E+PpF2e4/Lwc3l1ews3oCUnEX251fVHYVUHJBeGYpRKM0LUlN0r6D0+XDlBMeEWiDzxceLMwKbK6MMHNB51ouC9kcczmgH1p63bd26I3alBNQNPJ8QQAKgwmYT+PFf+zcpqKfRVi2xQNKAz1AZVRUQiIMSfBMeZUEDBmnoIdhS7zmiv3BeY0k5SlR0VoCwhEjE9QSpfoaCIAqgKmnzvHuKQBACRP+2vudkRLRGFknD4hvFBh4AQT+gQxPZ7tBRTI5CJTteQLZFxFGB68YxQk6rDpR/HLovZ+ELcEyPtBfD+I/9lBzBDXAuoHyGJkm9jyiB1a9kW/h2zfcd2ud+G6Praw7/oO8XqYEPXgO+jR9Yr2No5Yo5TeBwOev1clmErGF0rbqIeyZZum5XW35cD1/Db+LcafvCX6dv+E09nfw+XH+XA0W4BrlcTb6BvgYpjH8bj65RWpDXfox5Ja9b0YvhfDd1Xy/6gSrYdUE1dNDI4uU3CQA1LLSd3pUdFSQq0FnyHjrYAkoDMl1zKxleUcNBwRLZbGi44qFIzP6uXf7AZ2jikqlj1ScicclD+fKVk5I4vGHApbGPC3EuwYWhDkM/Ca7WtDdhu2RVJAr77mVApWEpQ0NliDgSRibNaYRIOBhAqRk4OE9G5nNYcwVko9zRO/bJ8O0ee7aUs02IiEoJMUOvC3O47dTDVBYaw6O5+jFEfNKVf4wij/01lFk8YmKixQ+DHzG3OoqZAmKAwQFGX/KsenWKUYFX6LkJPwJBMVvibUk5WmedXmKYKaDlpnCSluzLjCG0UVwRilkxzGTY0ZawLIrmrsJ2i5d7yxeyevGYBOTWs49fPTCtWaQ1lYThjg/DZ2eINiTahHFk3TvDm90G7r0XZTtgq+taVPpNwYx9a0P2K+rLqdwljX07JYV8xN/a+q9cq+Tf5q/HdCaPX4UBjFTxmSkapAsIxq25oaW8GNwkcwi+l0OkVEkJ6rJYSfcCpLNNAgjiOYJTW1r1jhQYkkSOpZXajMC6BJihkKGseyJgCyMmVNbSzRm3nOoMVrbJgGb7IlFMbvkqXNa+SaYZM3ZZKGFF5s7Z927i6YWyTFBwHv3MgZiuMz2JWSYgr7FOa+KmMg+kjzqJRoMB/empQzypjAlm0cZU5KfGGUY+SAZDFbnPTGaoNCvasRmHFoH+yO0+1Z8B7h+fvzP+6CgBWjHAAA", "encoding": "utf-8"}, "headers": {"Status": ["200 OK"], "X-RateLimit-Remaining": ["9"], "X-GitHub-Media-Type": ["github.cloak-preview"], "X-Runtime-rack": ["0.066329"], "Content-Security-Policy": ["default-src 'none'"], "X-Content-Type-Options": ["nosniff"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "X-GitHub-Request-Id": ["BFE0:5172:2C4F2:5A0E2:5B64C901"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "X-XSS-Protection": ["1; mode=block"], "Server": ["GitHub.com"], "X-RateLimit-Limit": ["10"], "Cache-Control": ["no-cache"], "Date": ["Fri, 03 Aug 2018 21:28:33 GMT"], "Access-Control-Allow-Origin": ["*"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Frame-Options": ["deny"], "Content-Encoding": ["gzip"], "X-RateLimit-Reset": ["1533331773"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/search/commits?q=css+repo%3Aoctocat%2FSpoon-Knife&per_page=100"}, "recorded_at": "2018-08-03T21:28:33"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/cassettes/GitHub_search_commits_with_text_match.json b/tests/cassettes/GitHub_search_commits_with_text_match.json new file mode 100644 index 00000000..1ec94cb5 --- /dev/null +++ b/tests/cassettes/GitHub_search_commits_with_text_match.json @@ -0,0 +1,1 @@ +{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Encoding": ["gzip, deflate"], "Accept": ["application/vnd.github.cloak-preview, application/vnd.github.v3.full.text-match+json"], "User-Agent": ["github3.py/1.1.0"], "Accept-Charset": ["utf-8"], "Connection": ["keep-alive"], "Content-Type": ["application/json"]}, "method": "GET", "uri": "https://api.github.com/search/commits?q=css+repo%3Aoctocat%2FSpoon-Knife&per_page=100"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1ZbW/bNhD+K4a/LrEkS7YlA0VXNFnRDUrRzO2aDIVBUZRFWxIFkoprC/nvO1KybAd1Y8vd9iUIECeynof3wjveHcuuZBIlU8yKTHbH1kWXZpileUIkmXIiikSK7jhCiSDwlSQp/Pd32S140h13YylzMTYMlNPejMq4CHqANTjJmTAYlgwjafyZM5Zd/pHRiBjwbUqlMILAwdgN7aBPLCewUTQIo6HnuaNhGDpRZCMcmmYwiroXXREjWOkEQMZCMqUhgPyru+HHvieDvxLTn/hL/+p64M9zeje/nd+t44Xff7+6n1ybd/Ob+H7+eXE/vxt8mMzsm/R2cfPuPvX7v83vr94s7+ZhCpLEMk2m+4rvKH1Y3VO0VQYimRRP1vnXDKw9ohYE/SrndMetvQvWaONhVMiYcbVuiCQBv/VNy7k0+5emM7Gcse2O7eGl6Y5NE4TMUKpemcSk86HaYPCQpIiq/Vj74NeMLWPCidqM3ceNYpJ8bw2rP7EGY8sdDwZnrZESIdBMifaWE1CjI+QqIaKHheigLOwUuVIu7Nxev7nyr0FmyQm8fZatFYUw0ND2iDeMPNN2R94oCtCQmM7QizwHOYGJsTPwIjR0m1g6GlCbDrZHkx4seLb1V8JmNNvaHVZQcTdw7b4NiWQ3ED8OP3+5SfD8k+Ov36/9j69ewcvoAUnEn251/VDYdUIpBOGYZRKE0LmlMCr61w+vHKCY8ZpEBzw8+GFiUmRNYoKXj4poeC9iScKWgH0q637e26M3GlBDQLPZ6QQAKg0mYT9PFf+jUpqK5zLEvigaUBrqAzKjohBgY07CU8SpISDMMgM5Sp3mNVcRCMxpLinLTrLQHhCIGJ+hjK7RyUQAVAlMnzunqKQBACQPz+fcfYtWiNLIOX1AeKXMwAkm9AFsejrbEyiQyVWucskn8LiyMBy8UxSmKtj0Ufw0qb0E4l4B8hKIL4H4nwVijrguoH5CWYxsE1sesSPLHg0HyA4c1+17I9cNsIUDN3CIN8CEqIPvqKPrO7W3ccIaVel9NODxa52CqWR8pWobdShbtmlaXn+/HLhefkh+T/A7b42+3D7gbPHNX79Z+pPFClSrS7ydvgEeRkWSTOtvvlNqwxv6WFKrviTDl2T4UpX8P1WJrodUE1dPDE5OUxDIIWnKSd3pUdFRhVoHPiPGOyFJoc6UXJeJnbzgUMMR0WFZsuqpRMH4oln+h93AwTFFzfJMKXkQDpU/X6iyckFWrTkUtjTgd12wY2hBUMBAa/ZcG3JYsD2SEnr1LaeqYCVBaWuBNRhIYsYWrUk0GEioEAU5qpA+rKzmEMamUs+KNKjap2Pq88O0FRpkRELQWQYd+I87jsNMDUFpbDq7gKMMx+0pN/jSqP7SXkWz1iIqLFAECQtac6ipkCYoDSgoqv5VTs+RSjEq/B4hJ9FZIip8Q6gnK239qsVTBA0dtM4SXNyacYM3ytqCCcpmBYybWjM2BOBd1djP0PrZ8cbhnbxlADo1reE0KM5LVFsOJWE1YYD4ba3wDsWWUI8s2rp5d3qh1daj7bZsNXxvS59JuTOObWh/xnxZdTulsc2nVbKumdvqX2frjXy7/PX47wzT6vGhMMpfciRjlYFgGdW2tRW2hhtlgGAW0+v1ypggPVdLCT8jKis00CCOY5gltZWv3OChEkmR1LO6SIkXQpOUMBS2tmVDAGSVy9rKWKF3/ZxDi9daMA3eZUspjN8ly9rnyC3DLm/GJI0oXGw9P+08nDD3SMrXAu7cyAVKkgvYlZJiCvsU5r7KY1D0kfZWqdAgPtyaVDPKhMCWbW1lTip8aVRj5JDkCVuddWO1Q6HuagRmHNoHu+f0hwO3b6uq9Jucwi5WdZEesrBgTrBsu+/qpHrKpVy9YD2TfasTvV/f91x0c85ywiUMPCD6q0sg6EI4mimzHH8ftKuhUhmQcHekwiwLKdaqW86FNfr6WP38AyuRIHW5HQAA", "encoding": "utf-8"}, "headers": {"Status": ["200 OK"], "X-RateLimit-Remaining": ["8"], "X-GitHub-Media-Type": ["github.cloak-preview; param=full.text-match"], "X-Runtime-rack": ["0.043624"], "Content-Security-Policy": ["default-src 'none'"], "X-Content-Type-Options": ["nosniff"], "Access-Control-Expose-Headers": ["ETag, Link, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval"], "Transfer-Encoding": ["chunked"], "X-GitHub-Request-Id": ["B258:516C:12CE5:2A7F2:5B64C902"], "Strict-Transport-Security": ["max-age=31536000; includeSubdomains; preload"], "X-XSS-Protection": ["1; mode=block"], "Server": ["GitHub.com"], "X-RateLimit-Limit": ["10"], "Cache-Control": ["no-cache"], "Date": ["Fri, 03 Aug 2018 21:28:34 GMT"], "Access-Control-Allow-Origin": ["*"], "Referrer-Policy": ["origin-when-cross-origin, strict-origin-when-cross-origin"], "Content-Type": ["application/json; charset=utf-8"], "X-Frame-Options": ["deny"], "Content-Encoding": ["gzip"], "X-RateLimit-Reset": ["1533331773"]}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/search/commits?q=css+repo%3Aoctocat%2FSpoon-Knife&per_page=100"}, "recorded_at": "2018-08-03T21:28:34"}], "recorded_with": "betamax/0.8.1"} \ No newline at end of file diff --git a/tests/integration/test_github.py b/tests/integration/test_github.py index b88c275f..b827194b 100644 --- a/tests/integration/test_github.py +++ b/tests/integration/test_github.py @@ -523,6 +523,27 @@ class TestGitHub(IntegrationHelper): assert isinstance(code_result, github3.search.CodeSearchResult) assert len(code_result.text_matches) > 0 + def test_search_commits(self): + """Test the ability to search for commits.""" + cassette_name = self.cassette_name('search_commits') + with self.recorder.use_cassette(cassette_name): + result_iterator = self.gh.search_commits( + 'css repo:octocat/Spoon-Knife') + commit_result = next(result_iterator) + + assert isinstance(commit_result, github3.search.CommitSearchResult) + + def test_search_commits_with_text_match(self): + """Test the ability to search for commits with text matches.""" + cassette_name = self.cassette_name('search_commits_with_text_match') + with self.recorder.use_cassette(cassette_name): + result_iterator = self.gh.search_commits( + 'css repo:octocat/Spoon-Knife', text_match=True) + commit_result = next(result_iterator) + + assert isinstance(commit_result, github3.search.CommitSearchResult) + assert len(commit_result.text_matches) > 0 + def test_search_users(self): """Test the ability to use the user search endpoint.""" cassette_name = self.cassette_name('search_users') diff --git a/tests/unit/helper.py b/tests/unit/helper.py index a38bf53e..9a819e98 100644 --- a/tests/unit/helper.py +++ b/tests/unit/helper.py @@ -241,6 +241,24 @@ class UnitIteratorHelper(UnitHelper): self.get_json_mock.stop() +class UnitSearchIteratorHelper(UnitIteratorHelper): + + """Base class for search iterator based unit tests.""" + + def patch_get_json(self): + """Patch a SearchIterator's _get_json method.""" + self.get_json_mock = mock.patch.object( + github3.structs.SearchIterator, '_get_json' + ) + self.patched_get_json = self.get_json_mock.start() + self.patched_get_json.return_value = [] + + def setUp(self): + """Use UnitIteratorHelper's setUp and patch _get_json.""" + super(UnitSearchIteratorHelper, self).setUp() + self.patch_get_json() + + class UnitRequiresAuthenticationHelper(UnitHelper): """Helper for unit tests that demonstrate authentication is required.""" diff --git a/tests/unit/test_github.py b/tests/unit/test_github.py index c4af3a6f..44e97243 100644 --- a/tests/unit/test_github.py +++ b/tests/unit/test_github.py @@ -1295,6 +1295,85 @@ class TestGitHubIterators(helper.UnitIteratorHelper): ) +class TestGitHubSearchIterators(helper.UnitSearchIteratorHelper): + + """Test GitHub methods that return search iterators.""" + + described_class = GitHub + example_data = None + + def test_search_code(self): + """Verify the request to search for code.""" + i = self.instance.search_code( + 'addClass in:file language:js repo:jquery/jquery') + self.get_next(i) + + self.session.get.assert_called_once_with( + url_for('search/code'), + params={'per_page': 100, + 'q': 'addClass in:file language:js repo:jquery/jquery'}, + headers={} + ) + + def test_search_commits(self): + """Verify the request to search for commits.""" + i = self.instance.search_commits( + 'css repo:octocat/Spoon-Knife') + self.get_next(i) + + self.session.get.assert_called_once_with( + url_for('search/commits'), + params={'per_page': 100, + 'q': 'css repo:octocat/Spoon-Knife'}, + headers={'Accept': 'application/vnd.github.cloak-preview'} + ) + + def test_search_issues(self): + """Verify the request to search for issues.""" + i = self.instance.search_issues( + 'windows label:bug language:python state:open', + sort='created', order='asc') + self.get_next(i) + + self.session.get.assert_called_once_with( + url_for('search/issues'), + params={'order': 'asc', + 'per_page': 100, + 'q': 'windows label:bug language:python state:open', + 'sort': 'created'}, + headers={} + ) + + def test_search_repositories(self): + """Verify the request to search for repositories.""" + i = self.instance.search_repositories( + 'tetris language:assembly', + sort='stars', order='asc') + self.get_next(i) + + self.session.get.assert_called_once_with( + url_for('search/repositories'), + params={'order': 'asc', + 'per_page': 100, + 'q': 'tetris language:assembly', + 'sort': 'stars'}, + headers={} + ) + + def test_search_users(self): + """Verify the request to search for users.""" + i = self.instance.search_users( + 'tom repos:>42 followers:>1000') + self.get_next(i) + + self.session.get.assert_called_once_with( + url_for('search/users'), + params={'per_page': 100, + 'q': 'tom repos:>42 followers:>1000'}, + headers={} + ) + + class TestGitHubRequiresAuthentication( helper.UnitRequiresAuthenticationHelper):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 2 }
1.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "betamax", "betamax_matchers" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": [ "dev-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 betamax==0.8.1 betamax-matchers==0.4.0 certifi==2021.5.30 charset-normalizer==2.0.12 distlib==0.3.9 execnet==1.9.0 filelock==3.4.1 -e git+https://github.com/sigmavirus24/github3.py.git@37b3cf99960523256d42c2ba512c11c74731123b#egg=github3.py idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 mock==1.0.1 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 pytest-xdist==3.0.2 python-dateutil==2.9.0.post0 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 swebench-matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==1.2.3 tox==3.28.0 typing_extensions==4.1.1 uritemplate==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 zipp==3.6.0
name: github3.py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - betamax==0.8.1 - betamax-matchers==0.4.0 - charset-normalizer==2.0.12 - distlib==0.3.9 - execnet==1.9.0 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - mock==1.0.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-xdist==3.0.2 - python-dateutil==2.9.0.post0 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typing-extensions==4.1.1 - uritemplate==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - wheel==0.21.0 - zipp==3.6.0 prefix: /opt/conda/envs/github3.py
[ "tests/integration/test_github.py::TestGitHub::test_search_commits", "tests/integration/test_github.py::TestGitHub::test_search_commits_with_text_match", "tests/unit/test_github.py::TestGitHubSearchIterators::test_search_commits" ]
[]
[ "tests/integration/test_github.py::TestGitHub::test_activate_membership", "tests/integration/test_github.py::TestGitHub::test_add_email_addresses", "tests/integration/test_github.py::TestGitHub::test_all_events", "tests/integration/test_github.py::TestGitHub::test_all_organizations", "tests/integration/test_github.py::TestGitHub::test_all_repositories", "tests/integration/test_github.py::TestGitHub::test_all_users", "tests/integration/test_github.py::TestGitHub::test_authorize", "tests/integration/test_github.py::TestGitHub::test_create_gist", "tests/integration/test_github.py::TestGitHub::test_create_issue", "tests/integration/test_github.py::TestGitHub::test_create_issue_multiple_assignees", "tests/integration/test_github.py::TestGitHub::test_create_key", "tests/integration/test_github.py::TestGitHub::test_create_repository", "tests/integration/test_github.py::TestGitHub::test_delete_email_addresses", "tests/integration/test_github.py::TestGitHub::test_emojis", "tests/integration/test_github.py::TestGitHub::test_emojis_etag", "tests/integration/test_github.py::TestGitHub::test_feeds", "tests/integration/test_github.py::TestGitHub::test_followers", "tests/integration/test_github.py::TestGitHub::test_followers_of", "tests/integration/test_github.py::TestGitHub::test_gist", "tests/integration/test_github.py::TestGitHub::test_gitignore_template", "tests/integration/test_github.py::TestGitHub::test_gitignore_templates", "tests/integration/test_github.py::TestGitHub::test_is_following", "tests/integration/test_github.py::TestGitHub::test_is_starred", "tests/integration/test_github.py::TestGitHub::test_issue", "tests/integration/test_github.py::TestGitHub::test_key", "tests/integration/test_github.py::TestGitHub::test_license", "tests/integration/test_github.py::TestGitHub::test_licenses", "tests/integration/test_github.py::TestGitHub::test_markdown", "tests/integration/test_github.py::TestGitHub::test_me", "tests/integration/test_github.py::TestGitHub::test_meta", "tests/integration/test_github.py::TestGitHub::test_non_existent_gitignore_template", "tests/integration/test_github.py::TestGitHub::test_notifications", "tests/integration/test_github.py::TestGitHub::test_notifications_all", "tests/integration/test_github.py::TestGitHub::test_octocat", "tests/integration/test_github.py::TestGitHub::test_organization", "tests/integration/test_github.py::TestGitHub::test_project", "tests/integration/test_github.py::TestGitHub::test_project_card", "tests/integration/test_github.py::TestGitHub::test_project_column", "tests/integration/test_github.py::TestGitHub::test_public_gists", "tests/integration/test_github.py::TestGitHub::test_pubsubhubbub", "tests/integration/test_github.py::TestGitHub::test_pull_request", "tests/integration/test_github.py::TestGitHub::test_rate_limit", "tests/integration/test_github.py::TestGitHub::test_repositories", "tests/integration/test_github.py::TestGitHub::test_repositories_by", "tests/integration/test_github.py::TestGitHub::test_repository", "tests/integration/test_github.py::TestGitHub::test_repository_invitations", "tests/integration/test_github.py::TestGitHub::test_repository_with_id", "tests/integration/test_github.py::TestGitHub::test_search_code", "tests/integration/test_github.py::TestGitHub::test_search_code_with_text_match", "tests/integration/test_github.py::TestGitHub::test_search_issues", "tests/integration/test_github.py::TestGitHub::test_search_repositories", "tests/integration/test_github.py::TestGitHub::test_search_repositories_with_text_match", "tests/integration/test_github.py::TestGitHub::test_search_users", "tests/integration/test_github.py::TestGitHub::test_search_users_with_text_match", "tests/integration/test_github.py::TestGitHub::test_star", "tests/integration/test_github.py::TestGitHub::test_unfollow", "tests/integration/test_github.py::TestGitHub::test_unstar", "tests/integration/test_github.py::TestGitHub::test_update_me", "tests/integration/test_github.py::TestGitHub::test_user", "tests/integration/test_github.py::TestGitHub::test_user_teams", "tests/integration/test_github.py::TestGitHub::test_user_with_id", "tests/integration/test_github.py::TestGitHub::test_zen", "tests/integration/test_github.py::TestGitHubEnterprise::test_admin_stats", "tests/integration/test_github.py::TestGitHubStatus::test_api", "tests/integration/test_github.py::TestGitHubStatus::test_last_message", "tests/integration/test_github.py::TestGitHubStatus::test_messages", "tests/integration/test_github.py::TestGitHubStatus::test_status", "tests/unit/test_github.py::TestGitHub::test_activate_membership", "tests/unit/test_github.py::TestGitHub::test_add_email_addresses", "tests/unit/test_github.py::TestGitHub::test_add_email_addresses_with_empty_list", "tests/unit/test_github.py::TestGitHub::test_authorization", "tests/unit/test_github.py::TestGitHub::test_authorize", "tests/unit/test_github.py::TestGitHub::test_authorize_without_scope", "tests/unit/test_github.py::TestGitHub::test_can_login_without_two_factor_callback", "tests/unit/test_github.py::TestGitHub::test_check_authorization", "tests/unit/test_github.py::TestGitHub::test_create_gist", "tests/unit/test_github.py::TestGitHub::test_create_key", "tests/unit/test_github.py::TestGitHub::test_create_key_requires_a_key", "tests/unit/test_github.py::TestGitHub::test_create_key_requires_a_title", "tests/unit/test_github.py::TestGitHub::test_create_key_with_readonly", "tests/unit/test_github.py::TestGitHub::test_create_repository", "tests/unit/test_github.py::TestGitHub::test_delete_email_addresses", "tests/unit/test_github.py::TestGitHub::test_emojis", "tests/unit/test_github.py::TestGitHub::test_feeds", "tests/unit/test_github.py::TestGitHub::test_follow", "tests/unit/test_github.py::TestGitHub::test_follow_requires_a_username", "tests/unit/test_github.py::TestGitHub::test_gist", "tests/unit/test_github.py::TestGitHub::test_gitignore_template", "tests/unit/test_github.py::TestGitHub::test_gitignore_templates", "tests/unit/test_github.py::TestGitHub::test_is_following", "tests/unit/test_github.py::TestGitHub::test_is_starred", "tests/unit/test_github.py::TestGitHub::test_is_starred_requires_a_repo", "tests/unit/test_github.py::TestGitHub::test_is_starred_requires_an_owner", "tests/unit/test_github.py::TestGitHub::test_issue", "tests/unit/test_github.py::TestGitHub::test_issue_requires_positive_issue_id", "tests/unit/test_github.py::TestGitHub::test_issue_requires_repository", "tests/unit/test_github.py::TestGitHub::test_issue_requires_username", "tests/unit/test_github.py::TestGitHub::test_key", "tests/unit/test_github.py::TestGitHub::test_key_negative_id", "tests/unit/test_github.py::TestGitHub::test_license", "tests/unit/test_github.py::TestGitHub::test_login", "tests/unit/test_github.py::TestGitHub::test_login_with_token", "tests/unit/test_github.py::TestGitHub::test_markdown", "tests/unit/test_github.py::TestGitHub::test_markdown_gfm", "tests/unit/test_github.py::TestGitHub::test_markdown_raw", "tests/unit/test_github.py::TestGitHub::test_me", "tests/unit/test_github.py::TestGitHub::test_meta", "tests/unit/test_github.py::TestGitHub::test_octocat", "tests/unit/test_github.py::TestGitHub::test_octocat_response_not_ok", "tests/unit/test_github.py::TestGitHub::test_organization", "tests/unit/test_github.py::TestGitHub::test_project", "tests/unit/test_github.py::TestGitHub::test_project_card", "tests/unit/test_github.py::TestGitHub::test_project_column", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_invalid_username", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_required_callback", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_required_mode", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_required_topic", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_secret", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_topic_no_match", "tests/unit/test_github.py::TestGitHub::test_pubsubhubbub_valid_username", "tests/unit/test_github.py::TestGitHub::test_pull_request", "tests/unit/test_github.py::TestGitHub::test_pull_request_negative_id", "tests/unit/test_github.py::TestGitHub::test_repository", "tests/unit/test_github.py::TestGitHub::test_repository_with_id", "tests/unit/test_github.py::TestGitHub::test_repository_with_id_accepts_a_string", "tests/unit/test_github.py::TestGitHub::test_repository_with_id_requires_a_positive_id", "tests/unit/test_github.py::TestGitHub::test_repository_with_invalid_repo", "tests/unit/test_github.py::TestGitHub::test_repository_with_invalid_user", "tests/unit/test_github.py::TestGitHub::test_repository_with_invalid_user_and_repo", "tests/unit/test_github.py::TestGitHub::test_set_client_id", "tests/unit/test_github.py::TestGitHub::test_set_user_agent", "tests/unit/test_github.py::TestGitHub::test_set_user_agent_required_user_agent", "tests/unit/test_github.py::TestGitHub::test_star", "tests/unit/test_github.py::TestGitHub::test_star_required_repo", "tests/unit/test_github.py::TestGitHub::test_star_required_username", "tests/unit/test_github.py::TestGitHub::test_star_required_username_and_repo", "tests/unit/test_github.py::TestGitHub::test_two_factor_login", "tests/unit/test_github.py::TestGitHub::test_unfollow", "tests/unit/test_github.py::TestGitHub::test_unfollow_required_username", "tests/unit/test_github.py::TestGitHub::test_unstar", "tests/unit/test_github.py::TestGitHub::test_unstar_requires_repository", "tests/unit/test_github.py::TestGitHub::test_unstar_requires_username", "tests/unit/test_github.py::TestGitHub::test_unstar_requires_username_and_repository", "tests/unit/test_github.py::TestGitHub::test_update_me", "tests/unit/test_github.py::TestGitHub::test_user", "tests/unit/test_github.py::TestGitHub::test_user_with_id", "tests/unit/test_github.py::TestGitHub::test_user_with_id_accepts_a_string", "tests/unit/test_github.py::TestGitHub::test_user_with_id_requires_a_positive_id", "tests/unit/test_github.py::TestGitHub::test_zen", "tests/unit/test_github.py::TestGitHubIterators::test_all_events", "tests/unit/test_github.py::TestGitHubIterators::test_all_organizations", "tests/unit/test_github.py::TestGitHubIterators::test_all_organizations_per_page", "tests/unit/test_github.py::TestGitHubIterators::test_all_organizations_since", "tests/unit/test_github.py::TestGitHubIterators::test_all_repositories", "tests/unit/test_github.py::TestGitHubIterators::test_all_repositories_per_page", "tests/unit/test_github.py::TestGitHubIterators::test_all_repositories_since", "tests/unit/test_github.py::TestGitHubIterators::test_all_users", "tests/unit/test_github.py::TestGitHubIterators::test_all_users_per_page", "tests/unit/test_github.py::TestGitHubIterators::test_all_users_since", "tests/unit/test_github.py::TestGitHubIterators::test_authorizations", "tests/unit/test_github.py::TestGitHubIterators::test_emails", "tests/unit/test_github.py::TestGitHubIterators::test_followed_by", "tests/unit/test_github.py::TestGitHubIterators::test_followers", "tests/unit/test_github.py::TestGitHubIterators::test_followers_of", "tests/unit/test_github.py::TestGitHubIterators::test_followers_require_auth", "tests/unit/test_github.py::TestGitHubIterators::test_following", "tests/unit/test_github.py::TestGitHubIterators::test_following_require_auth", "tests/unit/test_github.py::TestGitHubIterators::test_gists", "tests/unit/test_github.py::TestGitHubIterators::test_gists_by", "tests/unit/test_github.py::TestGitHubIterators::test_issues", "tests/unit/test_github.py::TestGitHubIterators::test_issues_on", "tests/unit/test_github.py::TestGitHubIterators::test_issues_on_with_params", "tests/unit/test_github.py::TestGitHubIterators::test_issues_with_params", "tests/unit/test_github.py::TestGitHubIterators::test_keys", "tests/unit/test_github.py::TestGitHubIterators::test_licenses", "tests/unit/test_github.py::TestGitHubIterators::test_notifications", "tests/unit/test_github.py::TestGitHubIterators::test_notifications_all", "tests/unit/test_github.py::TestGitHubIterators::test_notifications_participating_in", "tests/unit/test_github.py::TestGitHubIterators::test_organization_issues", "tests/unit/test_github.py::TestGitHubIterators::test_organization_issues_with_params", "tests/unit/test_github.py::TestGitHubIterators::test_organizations", "tests/unit/test_github.py::TestGitHubIterators::test_organizations_with", "tests/unit/test_github.py::TestGitHubIterators::test_public_gists", "tests/unit/test_github.py::TestGitHubIterators::test_public_gists_since", "tests/unit/test_github.py::TestGitHubIterators::test_repositories_by", "tests/unit/test_github.py::TestGitHubIterators::test_repositories_by_with_type", "tests/unit/test_github.py::TestGitHubIterators::test_repository_invitations", "tests/unit/test_github.py::TestGitHubIterators::test_respositories", "tests/unit/test_github.py::TestGitHubIterators::test_respositories_accepts_params", "tests/unit/test_github.py::TestGitHubIterators::test_starred", "tests/unit/test_github.py::TestGitHubIterators::test_starred_by", "tests/unit/test_github.py::TestGitHubIterators::test_subscriptions", "tests/unit/test_github.py::TestGitHubIterators::test_subscriptions_for", "tests/unit/test_github.py::TestGitHubIterators::test_user_issues", "tests/unit/test_github.py::TestGitHubIterators::test_user_issues_with_parameters", "tests/unit/test_github.py::TestGitHubSearchIterators::test_search_code", "tests/unit/test_github.py::TestGitHubSearchIterators::test_search_issues", "tests/unit/test_github.py::TestGitHubSearchIterators::test_search_repositories", "tests/unit/test_github.py::TestGitHubSearchIterators::test_search_users", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_activate_membership", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_add_email_addresses", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_authorization", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_authorizations", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_create_issue", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_create_key", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_create_repository", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_delete_email_addresses", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_emails", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_feeds", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_follow", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_gists", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_is_following", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_is_starred", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_issues", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_key", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_keys", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_me", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_notifications", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_organization_issues", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_organizations", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_pubsubhubbub", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_repositories", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_repository_invitations", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_star", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_starred", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_unfollow", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_unstar", "tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_user_issues", "tests/unit/test_github.py::TestGitHubAuthorizations::test_revoke_authorization", "tests/unit/test_github.py::TestGitHubAuthorizations::test_revoke_authorizations", "tests/unit/test_github.py::TestGitHubEnterprise::test_admin_stats", "tests/unit/test_github.py::TestGitHubEnterprise::test_str", "tests/unit/test_github.py::TestGitHubStatus::test_api", "tests/unit/test_github.py::TestGitHubStatus::test_last_message", "tests/unit/test_github.py::TestGitHubStatus::test_messages", "tests/unit/test_github.py::TestGitHubStatus::test_status" ]
[]
BSD 3-Clause "New" or "Revised" License
2,869
[ "src/github3/search/commit.py", "src/github3/search/__init__.py", "src/github3/github.py" ]
[ "src/github3/search/commit.py", "src/github3/search/__init__.py", "src/github3/github.py" ]