instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
oxan__djangorestframework-dataclasses-80
|
diff --git a/rest_framework_dataclasses/serializers.py b/rest_framework_dataclasses/serializers.py
index b046fd0..7e5c5fd 100644
--- a/rest_framework_dataclasses/serializers.py
+++ b/rest_framework_dataclasses/serializers.py
@@ -31,6 +31,18 @@ T = TypeVar('T', bound=Dataclass)
AnyT = TypeVar('AnyT')
+# Helper function create a dataclass instance
+def _create_instance(dataclass_type: Type[T], fields_map: Dict[str, dataclasses.Field[Any]], values: KWArgs) -> T:
+ # Aggregate fields by whether they must (or, at minimum can) be supplied to the constructor, or they can't be.
+ init_params = {name: value for name, value in values.items() if fields_map[name].init}
+ set_params = {name: value for name, value in values.items() if not fields_map[name].init}
+
+ instance = dataclass_type(**init_params)
+ for name, value in set_params.items():
+ setattr(instance, name, value)
+ return instance
+
+
# Helper function to strip the empty sentinel value and replace it with the default value from a dataclass
def _strip_empty_sentinels(data: AnyT, instance: Optional[AnyT] = None) -> AnyT:
if dataclasses.is_dataclass(data) and not isinstance(data, type):
@@ -43,7 +55,7 @@ def _strip_empty_sentinels(data: AnyT, instance: Optional[AnyT] = None) -> AnyT:
setattr(instance, field, value)
return instance
else:
- return cast(AnyT, type(data)(**values))
+ return cast(AnyT, _create_instance(type(data), {f.name: f for f in dataclasses.fields(data)}, values))
elif isinstance(data, list):
return cast(AnyT, [_strip_empty_sentinels(item) for item in data])
elif isinstance(data, dict):
@@ -189,14 +201,16 @@ class DataclassSerializer(rest_framework.serializers.Serializer, Generic[T]):
"inspect 'serializer.validated_data' instead. "
)
- # Explicitly use internal validated_data here, as we want the empty sentinel values instead of the normalized
- # external representation.
- validated_data = dataclasses.replace(self._validated_data, **kwargs)
+ # Explicitly use the internal validated_data here, as the empty sentinel values must not be stripped yet. Do not
+ # use dataclasses.replace(), as it doesn't handle non-init fields properly.
+ obj = copy.deepcopy(self._validated_data)
+ for field, value in kwargs.items():
+ setattr(obj, field, value)
if self.instance is not None:
- self.instance = self.update(self.instance, validated_data)
+ self.instance = self.update(self.instance, obj)
else:
- self.instance = self.create(validated_data)
+ self.instance = self.create(obj)
assert self.instance is not None, (
'`update()` or `create()` did not return an object instance.'
@@ -629,13 +643,13 @@ class DataclassSerializer(rest_framework.serializers.Serializer, Generic[T]):
# Only insert empty sentinel value for non-supplied values when the root serializer is in partial mode, to
# prevent them from showing up otherwise.
if self.root.partial:
- empty_values = {key: empty for key in self.dataclass_definition.fields.keys() if key not in native_values}
- else:
- empty_values = {}
-
- dataclass_type = self.dataclass_definition.dataclass_type
- instance = dataclass_type(**native_values, **empty_values)
+ native_values.update({key: empty
+ for key, field in self.dataclass_definition.fields.items()
+ if key not in native_values and field.init})
+ instance = _create_instance(self.dataclass_definition.dataclass_type,
+ self.dataclass_definition.fields,
+ native_values)
return cast(T, instance)
@cached_property
|
oxan/djangorestframework-dataclasses
|
69e4a91c12e98349c6458511c648b256d4c4e745
|
diff --git a/tests/test_functional.py b/tests/test_functional.py
index ab6abe4..1c9655b 100644
--- a/tests/test_functional.py
+++ b/tests/test_functional.py
@@ -71,6 +71,16 @@ class PersonSerializer(DataclassSerializer):
}
[email protected]
+class Obscure:
+ name: str = dataclasses.field(init=False)
+
+
+class ObscureSerializer(DataclassSerializer):
+ class Meta:
+ dataclass = Obscure
+
+
# noinspection PyUnresolvedReferences
class FunctionalTestMixin:
def test_serialize(self):
@@ -205,3 +215,15 @@ class PartialPersonTest(TestCase):
self.assertIs(output_instance, input_instance)
self.assertEqual(output_instance, expected_output)
+
+
+class ObscureFeaturesTest(TestCase, FunctionalTestMixin):
+ serializer = ObscureSerializer
+ instance = Obscure()
+ representation = {
+ 'name': 'Bob'
+ }
+ representation_readonly = {}
+
+ def setUp(self):
+ self.instance.name = 'Bob'
diff --git a/tests/test_issues.py b/tests/test_issues.py
index 6a96684..e6a6945 100644
--- a/tests/test_issues.py
+++ b/tests/test_issues.py
@@ -102,3 +102,17 @@ class IssuesTest(TestCase):
serializer.is_valid(raise_exception=True)
self.assertEqual(serializer.validated_data['foo'].value, 'default')
+
+ # Issue #71: Deserialization fails for dataclasses with non-init fields
+ def test_noninit_fields(self):
+ @dataclasses.dataclass
+ class A:
+ foo: str
+ bar: str = dataclasses.field(init=False)
+
+ serializer = DataclassSerializer(dataclass=A, data={'foo': 'abc', 'bar': 'def'})
+ serializer.is_valid(raise_exception=True)
+ instance = serializer.save()
+
+ self.assertEqual(instance.foo, 'abc')
+ self.assertEqual(instance.bar, 'def')
|
Serializer initializing dataclass with non init fields
Hi there!
I have a situation here, that I think might be a bug, or maybe I just haven't found a way to solve it since I'm still new with the package.
Example:
```python
from dataclasses import dataclass, field
@dataclass
class A:
foo: str
bar: str = field(init=False)
from rest_framework_dataclasses.serializers import DataclassSerializer
class ASerializer(DataclassSerializer):
class Meta:
dataclass = A
```
The previous code (adapted and simplified from my real code), raises the exception when trying to validate:
`TypeError: A.__init__() got an unexpected keyword argument 'bar'`
Proposal:
In the following [line of code](https://github.com/oxan/djangorestframework-dataclasses/blob/24a94994b1b6c20939619070b5709f18c598d2ee/rest_framework_dataclasses/serializers.py#L619), where the dataclass is being instantiated, it might be a good idea to exclude from the `empty_values`, those fields with `init=False`.
|
0.0
|
69e4a91c12e98349c6458511c648b256d4c4e745
|
[
"tests/test_functional.py::ObscureFeaturesTest::test_create",
"tests/test_functional.py::ObscureFeaturesTest::test_update",
"tests/test_functional.py::ObscureFeaturesTest::test_validated_data",
"tests/test_issues.py::IssuesTest::test_noninit_fields"
] |
[
"tests/test_functional.py::PetTest::test_create",
"tests/test_functional.py::PetTest::test_serialize",
"tests/test_functional.py::PetTest::test_update",
"tests/test_functional.py::PetTest::test_validated_data",
"tests/test_functional.py::EmptyPersonTest::test_create",
"tests/test_functional.py::EmptyPersonTest::test_serialize",
"tests/test_functional.py::EmptyPersonTest::test_update",
"tests/test_functional.py::EmptyPersonTest::test_validated_data",
"tests/test_functional.py::PartialPersonTest::test_update",
"tests/test_functional.py::ObscureFeaturesTest::test_serialize",
"tests/test_issues.py::IssuesTest::test_create_source",
"tests/test_issues.py::IssuesTest::test_empty_sentinel_nesting",
"tests/test_issues.py::IssuesTest::test_forward_reference_list",
"tests/test_issues.py::IssuesTest::test_many_empty",
"tests/test_issues.py::IssuesTest::test_nested_list",
"tests/test_issues.py::IssuesTest::test_nested_nullable",
"tests/test_issues.py::IssuesTest::test_save_nested_dataclass"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-28 16:50:25+00:00
|
bsd-3-clause
| 4,435 |
|
paambaati__mae-2
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b188730..a12be4c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,10 @@
`mae` follows [Semantic Versioning](http://semver.org/).
+## [1.0.7] - 2019-02-28
+### Fixed
+- 🐞 Fixed a bug ([#1](https://github.com/paambaati/mae/issues/1)) where an app without labels can cause an empty response.
+
## [1.0.6] - 2018-12-31
### Added
- Tests for CLI.
diff --git a/mae/__init__.py b/mae/__init__.py
index d2c89c9..a89ccfb 100644
--- a/mae/__init__.py
+++ b/mae/__init__.py
@@ -1,5 +1,5 @@
-__name__ = 'mae'
-__version__ = '1.0.6'
+__name__ = 'Mesos-App-Exporter'
+__version__ = '1.0.7'
__author__ = 'GP'
__maintainer__ = 'GP'
__email__ = '[email protected]'
diff --git a/mae/mae.py b/mae/mae.py
index 8626e9f..a333b5b 100644
--- a/mae/mae.py
+++ b/mae/mae.py
@@ -45,7 +45,7 @@ class MesosAppExporter:
app_metrics_endpoints = list()
for index, task in enumerate(tasks):
- labels = task.get('labels').get('labels')
+ labels = task.get('labels', {}).get('labels', {})
label_dict = dict()
for label in labels:
label_dict.update({label.get('key'): label.get('value')})
|
paambaati/mae
|
ab1eadba7bec0c47199827dfae5bfa57a6ac94b1
|
diff --git a/tests/fixtures/task_data_4.json b/tests/fixtures/task_data_4.json
new file mode 100644
index 0000000..0e5bd6c
--- /dev/null
+++ b/tests/fixtures/task_data_4.json
@@ -0,0 +1,99 @@
+{
+ "get_tasks": {
+ "launched_tasks": [
+ {
+ "task_id": {
+ "value": "production.labelless-app.36dec447-0567-11e9-a0d5-12164c467f0e"
+ },
+ "name": "production.labelless-app",
+ "statuses": [
+ {
+ "executor_id": {
+ "value": "production.labelless-app.36dec447-0567-11e9-a0d5-12164c467f0e"
+ },
+ "uuid": "nPcAC+mhSnKooebv+6SnfA==",
+ "task_id": {
+ "value": "production.labelless-app.36dec447-0567-11e9-a0d5-12164c467f0e"
+ },
+ "healthy": true,
+ "timestamp": 1545427722.56431,
+ "container_status": {
+ "executor_pid": 7277,
+ "network_infos": [
+ {
+ "ip_addresses": [
+ {
+ "ip_address": "10.181.9.9"
+ }
+ ]
+ }
+ ]
+ },
+ "source": "SOURCE_EXECUTOR",
+ "state": "TASK_RUNNING",
+ "agent_id": {
+ "value": "d59f7ce8-7b0a-48e1-a71d-81ddad9cfa8c-S461"
+ }
+ }
+ ],
+ "status_update_state": "TASK_RUNNING",
+ "status_update_uuid": "nPcAC+mhSnKooebv+6SnfA==",
+ "framework_id": {
+ "value": "545b613d-52cc-4f5f-922c-4dacc444e544-0000"
+ },
+ "state": "TASK_RUNNING",
+ "agent_id": {
+ "value": "d59f7ce8-7b0a-48e1-a71d-81ddad9cfa8c-S461"
+ },
+ "resources": [
+ {
+ "type": "SCALAR",
+ "scalar": {
+ "value": 2.0
+ },
+ "role": "*",
+ "name": "cpus"
+ },
+ {
+ "type": "SCALAR",
+ "scalar": {
+ "value": 14000.0
+ },
+ "role": "*",
+ "name": "mem"
+ },
+ {
+ "ranges": {
+ "range": [
+ {
+ "begin": 31064,
+ "end": 31065
+ }
+ ]
+ },
+ "type": "RANGES",
+ "role": "*",
+ "name": "ports"
+ }
+ ],
+ "discovery": {
+ "name": "production.labelless-app",
+ "visibility": "FRAMEWORK",
+ "ports": {
+ "ports": [
+ {
+ "protocol": "tcp",
+ "number": 31064
+ },
+ {
+ "protocol": "tcp",
+ "number": 31065
+ }
+ ]
+ }
+ }
+ }
+ ]
+ },
+ "type": "GET_TASKS"
+}
\ No newline at end of file
diff --git a/tests/test_mae.py b/tests/test_mae.py
index f421c40..e8a34c0 100644
--- a/tests/test_mae.py
+++ b/tests/test_mae.py
@@ -71,6 +71,14 @@ class TestMesosAppExporter(unittest.TestCase):
endpoints = self.exporter.get_app_metrics_endpoints(task_data)
self.assertEqual(len(endpoints), 0)
+ def test_mae_get_endpoint_4_no_labels(self):
+ """
+ Test if task data without any labels do not throw any errors.
+ """
+ task_data = self.__readJsonFile('fixtures/task_data_4.json')
+ endpoints = self.exporter.get_app_metrics_endpoints(task_data)
+ self.assertEqual(len(endpoints), 0)
+
@patch('requests.get')
def test_get_metrics_1_single_app(self, mock_request):
"""
|
An app without any labels can crash mae's response
When the app definition does not have any labels, the response is empty. This can fixed by adding defaults to the `get()` calls here —
https://github.com/paambaati/mae/blob/ab1eadba7bec0c47199827dfae5bfa57a6ac94b1/mae/mae.py#L48
|
0.0
|
ab1eadba7bec0c47199827dfae5bfa57a6ac94b1
|
[
"tests/test_mae.py::TestMesosAppExporter::test_mae_get_endpoint_4_no_labels"
] |
[
"tests/test_mae.py::TestMesosAppExporter::test_get_metrics_1_single_app",
"tests/test_mae.py::TestMesosAppExporter::test_get_metrics_2_multiple_apps",
"tests/test_mae.py::TestMesosAppExporter::test_mae_get_endpoint_1_single_task",
"tests/test_mae.py::TestMesosAppExporter::test_mae_get_endpoint_2_multiple_tasks",
"tests/test_mae.py::TestMesosAppExporter::test_mae_get_endpoint_3_no_matching_tasks"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-28 06:26:10+00:00
|
mit
| 4,436 |
|
package-url__packageurl-python-29
|
diff --git a/src/packageurl/__init__.py b/src/packageurl/__init__.py
index 71225e9..ae6e148 100644
--- a/src/packageurl/__init__.py
+++ b/src/packageurl/__init__.py
@@ -282,6 +282,9 @@ class PackageURL(namedtuple('PackageURL', _components)):
def __str__(self, *args, **kwargs):
return self.to_string()
+ def __hash__(self):
+ return hash(self.to_string())
+
def to_dict(self, encode=False):
"""
Return an ordered dict of purl components as {key: value}. If `encode`
|
package-url/packageurl-python
|
a762fbd361a253e3c12f7fff61c305b6a69d0ed8
|
diff --git a/tests/test_packageurl.py b/tests/test_packageurl.py
index 912b380..6081912 100644
--- a/tests/test_packageurl.py
+++ b/tests/test_packageurl.py
@@ -286,3 +286,9 @@ class NormalizePurlTest(unittest.TestCase):
('subpath', u'this/is/a/path')
])
assert expected == purl.to_dict(encode=True)
+
+
+def test_purl_is_hashable():
+ s = {PackageURL(name='hashable', type='pypi')}
+ assert len(s) == 1
+
|
PackageURL type is not hashable
Storing `PackageURL` instances in a set can be useful. Currently attempting this fails with the following error:
```
Python 3.8.2 (default, Feb 26 2020, 22:21:03)
[GCC 9.2.1 20200130] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from packageurl import PackageURL
>>> {PackageURL(name='test', type='pypi')}
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unhashable type: 'collections.OrderedDict'
```
|
0.0
|
a762fbd361a253e3c12f7fff61c305b6a69d0ed8
|
[
"tests/test_packageurl.py::test_purl_is_hashable"
] |
[
"tests/test_packageurl.py::test_packageurl.python_safe_name",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_basic_valid_maven_purl_without_version",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_bitbucket_namespace_and_name_should_be_lowercased",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_debian_can_use_qualifiers",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_docker_uses_qualifiers_and_hash_image_id_as_versions",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_double_slash_after_scheme_is_not_significant",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_github_namespace_and_name_should_be_lowercased",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_a_name_is_required",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_a_scheme_is_always_required",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_a_type_is_always_required",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_checks_for_invalid_qualifier_keys",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_java_gem_can_use_a_qualifier",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_maven_can_come_with_a_type_qualifier",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_maven_often_uses_qualifiers",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_maven_pom_reference",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_npm_can_be_scoped",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_nuget_names_are_case_sensitive",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_pypi_names_have_special_rules_and_not_case_sensitive",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_rpm_often_use_qualifiers",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_slash_after_scheme_is_not_significant",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_slash_after_type_is_not_significant",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_go_purl_with_version_and_subpath",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_go_purl_without_version_and_with_subpath",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_maven_purl",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_maven_purl_containing_a_space_in_the_version_and_qualifier",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_maven_purl_with_case_sensitive_namespace_and_name",
"tests/test_packageurl.py::NormalizePurlTest::test_create_PackageURL_from_qualifiers_dict",
"tests/test_packageurl.py::NormalizePurlTest::test_create_PackageURL_from_qualifiers_string",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_decode_can_take_unicode_with_non_ascii_with_slash",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_encode_always_reencodes",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_encode_can_take_unicode_with_non_ascii_with_slash",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_qualifiers_as_dict",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_qualifiers_as_string",
"tests/test_packageurl.py::NormalizePurlTest::test_qualifiers_must_be_key_value_pairs",
"tests/test_packageurl.py::NormalizePurlTest::test_to_dict_optionally_returns_qualifiers_as_string"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-04-04 15:11:30+00:00
|
apache-2.0
| 4,437 |
|
package-url__packageurl-python-44
|
diff --git a/src/packageurl/contrib/url2purl.py b/src/packageurl/contrib/url2purl.py
index 8d5bde2..d10f4ed 100644
--- a/src/packageurl/contrib/url2purl.py
+++ b/src/packageurl/contrib/url2purl.py
@@ -343,7 +343,7 @@ def build_github_api_purl(url):
github_codeload_pattern = (
r"https?://codeload.github.com/"
- r"(?P<namespace>.+)/(?P<name>.+)/(zip|tar.gz|tar.bz2|.tgz)/v(?P<version>.+)$"
+ r"(?P<namespace>.+)/(?P<name>.+)/(zip|tar.gz|tar.bz2|.tgz)/v?(?P<version>.+)$"
)
@@ -367,17 +367,27 @@ def build_github_purl(url):
https://github.com/package-url/packageurl-js or
https://github.com/nexB/scancode-toolkit/archive/v3.1.1.zip
"""
- #https://github.com/nexB/scancode-toolkit/archive/v3.1.1.zip
- gh_pattern = r"https?://github.com/(?P<namespace>.+)/(?P<name>.+)/archive/v(?P<version>.+).(zip|tar.gz|tar.bz2|.tgz)"
- matches = re.search(gh_pattern, url)
+ # https://github.com/nexB/scancode-toolkit/archive/v3.1.1.zip
+ archive_pattern = (
+ r"https?://github.com/(?P<namespace>.+)/(?P<name>.+)"
+ r"/archive/v?(?P<version>.+).(zip|tar.gz|tar.bz2|.tgz)"
+ )
- if matches:
- return purl_from_pattern(type_='github', pattern=gh_pattern, url=url)
+ # https://github.com/pypa/get-virtualenv/raw/20.0.31/public/virtualenv.pyz
+ raw_pattern = (
+ r"https?://github.com/(?P<namespace>.+)/(?P<name>.+)"
+ r"/raw/v?(?P<version>[^/]+)/(?P<subpath>.*)$"
+ )
- segments = get_path_segments(url)
+ for pattern in [archive_pattern, raw_pattern]:
+ matches = re.search(pattern, url)
+ if matches:
+ return purl_from_pattern(type_='github', pattern=pattern, url=url)
- if segments==[]:
+ segments = get_path_segments(url)
+ if not segments:
return
+
namespace = segments[0]
name = segments[1]
version = None
|
package-url/packageurl-python
|
46116009b6815fc7c31bf4319bdb7d18f79fa066
|
diff --git a/tests/contrib/data/url2purl.json b/tests/contrib/data/url2purl.json
index 899170a..db52fff 100644
--- a/tests/contrib/data/url2purl.json
+++ b/tests/contrib/data/url2purl.json
@@ -1,5 +1,5 @@
{
- "http://central.maven.org/maven2/ant-contrib/ant-contrib/1.0b3/ant-contrib-1.0b3.jar": "pkg:maven/ant-contrib/[email protected]",
+ "http://central.maven.org/maven2/ant-contrib/ant-contrib/1.0b3/ant-contrib-1.0b3.jar": "pkg:maven/ant-contrib/[email protected]",
"http://repo1.maven.org/maven2/ant-contrib/ant-contrib/1.0b3/ant-contrib-1.0b3.jar": "pkg:maven/ant-contrib/[email protected]",
"maven-index://repo1.maven.org/ant-contrib/ant-contrib/1.0b3/ant-contrib-1.0b3.jar": "pkg:maven/ant-contrib/[email protected]",
"maven-index://repo1.maven.org/ant-contrib/ant-contrib/1.0b3/": "pkg:maven/ant-contrib/[email protected]",
@@ -13,18 +13,18 @@
"http://central.maven.org/maven2/org/apache/commons/commons-math3/3.6.1/commons-math3-3.6.1-sources.jar": "pkg:maven/org.apache.commons/[email protected]?classifier=sources",
"http://repo1.maven.org/maven2/jdbm/jdbm/0.20-dev/jdbm-0.20-dev.pom": "pkg:maven/jdbm/[email protected]?type=pom",
"http://central.maven.org/maven2/ant/ant-optional/1.5.3-1/ant-optional-1.5.3-1.jar": "pkg:maven/ant/[email protected]",
- "http://central.maven.org/maven2/ant/ant/1.5/ant-1.5.jar": "pkg:maven/ant/[email protected]",
- "http://central.maven.org/maven2/antlr/antlr/2.7.7/antlr-2.7.7.jar": "pkg:maven/antlr/[email protected]",
- "http://central.maven.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.jar": "pkg:maven/aopalliance/[email protected]",
- "http://central.maven.org/maven2/fr/opensagres/xdocreport/fr.opensagres.xdocreport.converter.docx.xwpf/1.0.5/fr.opensagres.xdocreport.converter.docx.xwpf-1.0.5.jar": "pkg:maven/fr.opensagres.xdocreport/[email protected]",
- "http://central.maven.org/maven2/org/eclipse/jetty/orbit/org.apache.jasper.glassfish/2.2.2.v201112011158/org.apache.jasper.glassfish-2.2.2.v201112011158-sources.jar": "pkg:maven/org.eclipse.jetty.orbit/[email protected]?classifier=sources",
- "http://central.maven.org/maven2/org/eclipse/jetty/orbit/org.apache.taglibs.standard.glassfish/1.2.0.v201112081803/org.apache.taglibs.standard.glassfish-1.2.0.v201112081803-sources.jar": "pkg:maven/org.eclipse.jetty.orbit/[email protected]?classifier=sources",
- "http://central.maven.org/maven2/org/springframework/security/kerberos/spring-security-kerberos-core/1.0.1.RELEASE/spring-security-kerberos-core-1.0.1.RELEASE-sources.jar": "pkg:maven/org.springframework.security.kerberos/[email protected]?classifier=sources",
- "http://central.maven.org/maven2/org/springframework/security/kerberos/spring-security-kerberos-web/1.0.1.RELEASE/spring-security-kerberos-web-1.0.1.RELEASE-sources.jar": "pkg:maven/org.springframework.security.kerberos/[email protected]?classifier=sources",
- "http://central.maven.org/maven2/xmlunit/xmlunit/1.1/xmlunit-1.1.jar": "pkg:maven/xmlunit/[email protected]",
- "http://central.maven.org/maven2/xom/xom/1.0/xom-1.0.jar": "pkg:maven/xom/[email protected]",
- "http://central.maven.org/maven2/xom/xom/1.1/xom-1.1-sources.jar": "pkg:maven/xom/[email protected]?classifier=sources",
- "http://central.maven.org/maven2/xpp3/xpp3/1.1.3.4.O/xpp3-1.1.3.4.O.jar": "pkg:maven/xpp3/[email protected]",
+ "http://central.maven.org/maven2/ant/ant/1.5/ant-1.5.jar": "pkg:maven/ant/[email protected]",
+ "http://central.maven.org/maven2/antlr/antlr/2.7.7/antlr-2.7.7.jar": "pkg:maven/antlr/[email protected]",
+ "http://central.maven.org/maven2/aopalliance/aopalliance/1.0/aopalliance-1.0.jar": "pkg:maven/aopalliance/[email protected]",
+ "http://central.maven.org/maven2/fr/opensagres/xdocreport/fr.opensagres.xdocreport.converter.docx.xwpf/1.0.5/fr.opensagres.xdocreport.converter.docx.xwpf-1.0.5.jar": "pkg:maven/fr.opensagres.xdocreport/[email protected]",
+ "http://central.maven.org/maven2/org/eclipse/jetty/orbit/org.apache.jasper.glassfish/2.2.2.v201112011158/org.apache.jasper.glassfish-2.2.2.v201112011158-sources.jar": "pkg:maven/org.eclipse.jetty.orbit/[email protected]?classifier=sources",
+ "http://central.maven.org/maven2/org/eclipse/jetty/orbit/org.apache.taglibs.standard.glassfish/1.2.0.v201112081803/org.apache.taglibs.standard.glassfish-1.2.0.v201112081803-sources.jar": "pkg:maven/org.eclipse.jetty.orbit/[email protected]?classifier=sources",
+ "http://central.maven.org/maven2/org/springframework/security/kerberos/spring-security-kerberos-core/1.0.1.RELEASE/spring-security-kerberos-core-1.0.1.RELEASE-sources.jar": "pkg:maven/org.springframework.security.kerberos/[email protected]?classifier=sources",
+ "http://central.maven.org/maven2/org/springframework/security/kerberos/spring-security-kerberos-web/1.0.1.RELEASE/spring-security-kerberos-web-1.0.1.RELEASE-sources.jar": "pkg:maven/org.springframework.security.kerberos/[email protected]?classifier=sources",
+ "http://central.maven.org/maven2/xmlunit/xmlunit/1.1/xmlunit-1.1.jar": "pkg:maven/xmlunit/[email protected]",
+ "http://central.maven.org/maven2/xom/xom/1.0/xom-1.0.jar": "pkg:maven/xom/[email protected]",
+ "http://central.maven.org/maven2/xom/xom/1.1/xom-1.1-sources.jar": "pkg:maven/xom/[email protected]?classifier=sources",
+ "http://central.maven.org/maven2/xpp3/xpp3/1.1.3.4.O/xpp3-1.1.3.4.O.jar": "pkg:maven/xpp3/[email protected]",
"http://central.maven.org/maven2/xpp3/xpp3_min/1.1.4c/xpp3_min-1.1.4c.jar": "pkg:maven/xpp3/[email protected]",
"http://central.maven.org/maven2/org/apache/zookeeper/zookeeper/3.4.6/": "pkg:maven/org.apache.zookeeper/[email protected]",
"http://central.maven.org/maven2/org/apache/zookeeper/zookeeper/3.4.6": "pkg:maven/org.apache.zookeeper/[email protected]",
@@ -62,24 +62,24 @@
"https://registry.yarnpkg.com/@invisionag%2feslint-config-ivx": "pkg:npm/%40invisionag/eslint-config-ivx",
"https://registry.npmjs.org/automatta/-/automatta-0.0.1.tgz": "pkg:npm/[email protected]",
"http://registry.npmjs.org/1to2/-/1to2-1.0.0.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/accepts/-/accepts-1.2.2.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/accepts/-/accepts-1.2.2.tgz": "pkg:npm/[email protected]",
"http://registry.npmjs.org/acorn/-/acorn-0.11.0.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/co/-/co-4.6.0.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/d/-/d-0.1.1.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/co/-/co-4.6.0.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/d/-/d-0.1.1.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz": "pkg:npm/[email protected]",
"http://registry.npmjs.org/ms/-/ms-0.7.1.tgz": "pkg:npm/[email protected]",
- "http://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz": "pkg:npm/[email protected]",
+ "http://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz": "pkg:npm/[email protected]",
"https://registry.npmjs.org/@invisionag/eslint-config-ivx/-/eslint-config-ivx-0.0.2.tgz": "pkg:npm/%40invisionag/[email protected]",
- "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/q/-/q-1.5.1.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/wide-align/-/wide-align-1.1.2.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/widest-line/-/widest-line-2.0.0.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.3.0.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-3.0.0.tgz": "pkg:npm/[email protected]",
- "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/q/-/q-1.5.1.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/wide-align/-/wide-align-1.1.2.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/widest-line/-/widest-line-2.0.0.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.3.0.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-3.0.0.tgz": "pkg:npm/[email protected]",
+ "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz": "pkg:npm/[email protected]",
"http://rubygems.org/downloads/": null,
"http://rubygems.org/downloads/macaddr-1.6.1": null,
@@ -87,13 +87,13 @@
"http://rubygems.org/downloads/open4-1.3.0.gem": "pkg:rubygems/[email protected]",
"https://rubygems.org/downloads/actionmailer-4.0.3.gem": "pkg:rubygems/[email protected]",
"https://rubygems.org/downloads/activerecord-deprecated_finders-1.0.3.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/ejs-1.1.1.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/eventmachine-0.12.11.cloudfoundry.3.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/ffi-1.9.3.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/jwt-0.1.8.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/ref-1.0.5.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/talentbox-delayed_job_sequel-4.0.0.gem": "pkg:rubygems/[email protected]",
- "https://rubygems.org/downloads/unf-0.1.3.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/ejs-1.1.1.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/eventmachine-0.12.11.cloudfoundry.3.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/ffi-1.9.3.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/jwt-0.1.8.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/ref-1.0.5.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/talentbox-delayed_job_sequel-4.0.0.gem": "pkg:rubygems/[email protected]",
+ "https://rubygems.org/downloads/unf-0.1.3.gem": "pkg:rubygems/[email protected]",
"https://rubygems.org/downloads/yajl-ruby-1.2.0.gem": "pkg:rubygems/[email protected]",
"https://pypi.python.org/packages/source/z/zc.recipe.egg/zc.recipe.egg-2.0.0.tar.gz": "pkg:pypi/[email protected]",
@@ -155,10 +155,16 @@
"https://raw.githubusercontent.com/volatilityfoundation/dwarf2json/master/LICENSE.txt": "pkg:github/volatilityfoundation/dwarf2json@master#LICENSE.txt",
"https://api.github.com/repos/nexB/scancode-toolkit": "pkg:github/nexb/scancode-toolkit",
"https://api.github.com/repos/nexB/scancode-toolkit/commits/40593af0df6c8378d2b180324b97cb439fa11d66": "pkg:github/nexb/scancode-toolkit@40593af0df6c8378d2b180324b97cb439fa11d66",
+ "https://codeload.github.com/nexB/scancode-toolkit/tar.gz/3.1.1": "pkg:github/nexb/[email protected]",
"https://codeload.github.com/nexB/scancode-toolkit/tar.gz/v3.1.1": "pkg:github/nexb/[email protected]",
+ "https://codeload.github.com/nexB/scancode-toolkit/zip/3.1.1": "pkg:github/nexb/[email protected]",
"https://codeload.github.com/nexB/scancode-toolkit/zip/v3.1.1": "pkg:github/nexb/[email protected]",
+ "https://codeload.github.com/nexB/scancode.io/tar.gz/1.0": "pkg:github/nexb/[email protected]",
"https://codeload.github.com/nexB/scancode.io/tar.gz/v1.0": "pkg:github/nexb/[email protected]",
+ "https://github.com/nexB/scancode-toolkit/archive/3.1.1.zip": "pkg:github/nexb/[email protected]",
"https://github.com/nexB/scancode-toolkit/archive/v3.1.1.zip": "pkg:github/nexb/[email protected]",
+ "https://github.com/pypa/get-virtualenv/raw/20.0.31/public/virtualenv.pyz": "pkg:github/pypa/[email protected]#public/virtualenv.pyz",
+ "https://github.com/pypa/get-virtualenv/raw/v20.0.31/public/virtualenv.pyz": "pkg:github/pypa/[email protected]#public/virtualenv.pyz",
"https://bitbucket.org/TG1999/first_repo/src/qa/": "pkg:bitbucket/tg1999/first_repo@qa",
"https://bitbucket.org/TG1999/first_repo/src/QA/": "pkg:bitbucket/tg1999/first_repo@QA",
|
Incorrect PURL inferred from download URL
With https://github.com/pypa/get-virtualenv/raw/20.0.31/public/virtualenv.pyz this purl is inferred:
`pkg:github/pypa/get-virtualenv@raw#20.0.31/public/virtualenv.pyz` and this is not correct.
It should be instead:
`pkg:github/pypa/[email protected]#/public/virtualenv.pyz`
```Python
>>> from packageurl import url2purl
>>> url2purl.get_purl('https://github.com/pypa/get-virtualenv/raw/20.0.31/public/virtualenv.pyz')
PackageURL(type='github', namespace='pypa', name='get-virtualenv', version='raw', qualifiers=OrderedDict(), subpath='20.0.31/public/virtualenv.pyz')
```
|
0.0
|
46116009b6815fc7c31bf4319bdb7d18f79fa066
|
[
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_codeload_github_com_nexb_scancode_io_tar_gz_1_0",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_codeload_github_com_nexb_scancode_toolkit_tar_gz_3_1_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_codeload_github_com_nexb_scancode_toolkit_zip_3_1_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_nexb_scancode_toolkit_archive_3_1_1_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_pypa_get_virtualenv_raw_20_0_31_public_virtualenv_pyz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_pypa_get_virtualenv_raw_v20_0_31_public_virtualenv_pyz"
] |
[
"tests/contrib/test_get_path_segments.py::test_parsing_with_quoted_uri",
"tests/contrib/test_get_path_segments.py::test_parsing_empty_string",
"tests/contrib/test_get_path_segments.py::test_parsing_with_one_segment",
"tests/contrib/test_purl2url.py::test_purl2url_with_valid_purls",
"tests/contrib/test_purl2url.py::test_convert_with_invalid_purls",
"tests/contrib/test_url2purl.py::TestURL2PURL::test_get_purl_empty_string",
"tests/contrib/test_url2purl.py::TestURL2PURL::test_get_purl_none",
"tests/contrib/test_url2purl.py::TestURL2PURL::test_get_purl_unroutable_uri",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_ant_ant_1_5_ant_1_5_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_ant_ant_optional_1_5_3_1_ant_optional_1_5_3_1_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_ant_contrib_ant_contrib_1_0b3_ant_contrib_1_0b3_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_antlr_antlr_2_7_7_antlr_2_7_7_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_aopalliance_aopalliance_1_0_aopalliance_1_0_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_com_amazonaws_aws_java_sdk_1_8_5_aws_java_sdk_1_8_5_jar_asc",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_com_github_jnr_jffi_1_2_10_jffi_1_2_10_native_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_com_sun_jersey_jersey_archive_1_19_jersey_archive_1_19_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_commons_codec_commons_codec_1_6_commons_codec_1_6_javadoc_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_commons_codec_commons_codec_1_6_commons_codec_1_6_tests_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_commons_io_commons_io_2_3_commons_io_2_3_test_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_fr_opensagres_xdocreport_fr_opensagres_xdocreport_converter_docx_xwpf_1_0_5_fr_opensagres_xdocreport_converter_docx_xwpf_1_0_5_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_javax_activation_activation_1_1_activation_1_1_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_net_sf_json_lib_json_lib_2_3_json_lib_2_3_jdk15_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_axis2_mex_1_6_2_mex_1_6_2_mar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_commons_commons_math3_3_6_1_commons_math3_3_6_1_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_geronimo_gshell_gshell_assembly_1_0_alpha_1_gshell_assembly_1_0_alpha_1_full_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_geronimo_specs_geronimo_servlet_3_0_spec_1_0_geronimo_servlet_3_0_spec_1_0_source_release_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_kafka_kafka_2_11_0_10_1_0_kafka_2_11_0_10_1_0_scaladoc_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_yoko_yoko_1_0_yoko_1_0_pom",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_jar_asc",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_jar_asc_md5",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_jar_asc_sha1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_jar_md5",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_jar_sha1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_javadoc_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_apache_zookeeper_zookeeper_3_4_6_zookeeper_3_4_6_tests_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_drools_drools_guvnor_5_1_0_drools_guvnor_5_1_0_war",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_eclipse_jetty_jetty_distribution_9_4_11_v20180605_jetty_distribution_9_4_11_v20180605_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_eclipse_jetty_orbit_org_apache_jasper_glassfish_2_2_2_v201112011158_org_apache_jasper_glassfish_2_2_2_v201112011158_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_eclipse_jetty_orbit_org_apache_taglibs_standard_glassfish_1_2_0_v201112081803_org_apache_taglibs_standard_glassfish_1_2_0_v201112081803_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_jasypt_jasypt_1_9_0_jasypt_1_9_0_lite_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_jmxtrans_jmxtrans_251_jmxtrans_251_rpm",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_mongodb_casbah_commons_2_10_2_6_1_casbah_commons_2_10_2_6_1_test_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_springframework_security_kerberos_spring_security_kerberos_core_1_0_1_release_spring_security_kerberos_core_1_0_1_release_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_org_springframework_security_kerberos_spring_security_kerberos_web_1_0_1_release_spring_security_kerberos_web_1_0_1_release_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_servicemix_servicemix_1_0_servicemix_1_0_src_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_xmlunit_xmlunit_1_1_xmlunit_1_1_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_xom_xom_1_0_xom_1_0_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_xom_xom_1_1_xom_1_1_sources_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_xpp3_xpp3_1_1_3_4_o_xpp3_1_1_3_4_o_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_central_maven_org_maven2_xpp3_xpp3_min_1_1_4c_xpp3_min_1_1_4c_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_iweb_dl_sourceforge_net_project_findbugs_findbugs_1_3_4_findbugs_1_3_4_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_iweb_dl_sourceforge_net_project_sblim_sblim_cim_client2_2_2_5_sblim_cim_client2_2_2_5_src_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_a2freedom_a2_1_2_a2freedom_1_2_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_aloyscore_aloyscore_0_1a1_20stable_0_1a1_stable_aloyscore_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_arestc_net_sf_arestc_arestc_0_1_4_arestc_0_1_4_javadoc_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_intraperson_oldfiles_intraperson_0_28_intraperson_0_28_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_iswraid_iswraid_0_1_4_3_2_4_28_pre3_iswraid_patch_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_libpng_zlib_1_2_3_zlib_1_2_3_tar_bz2",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_myenterprise_oldfiles_1_0_0_2_myenterprise_source_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_pwiki_pwiki_0_1_2_0_1_2_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_tinyos_oldfiles_tinyos_1_1_0_tinyos_1_1_0_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_urlchecker_lu_ng_urlchecker_urlchecker_1_7_urlchecker_1_7_javadoc_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_wxhaskell_wxhaskell_wxhaskell_0_9_wxhaskell_src_0_9_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_wxmozilla_wxmozilla_0_5_5_wxmozilla_0_5_5_exe",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_xmlstar_xmlstarlet_1_0_0_xmlstarlet_1_0_0_1_src_rpm",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_zapping_zvbi_0_2_35_zvbi_0_2_35_tar_bz2",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_zclasspath_maven2_org_zclasspath_zclasspath_1_5_zclasspath_1_5_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_zinnia_zinnia_win32_0_06_zinnia_win32_0_06_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_master_dl_sourceforge_net_project_zznotes_zznotes_1_1_2_zznotes_1_1_2_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_nuget_org_packages_entityframework_4_2_0_0",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_1to2_1to2_1_0_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_abbrev_abbrev_1_0_9_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_accepts_accepts_1_2_2_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_acorn_acorn_0_11_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_co_co_4_6_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_d_d_0_1_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_functional_red_black_tree_functional_red_black_tree_1_0_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_json_stable_stringify_without_jsonify_json_stable_stringify_without_jsonify_1_0_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_ms_ms_0_7_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_registry_npmjs_org_validate_npm_package_license_validate_npm_package_license_3_0_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_ant_contrib_ant_contrib_1_0b3_ant_contrib_1_0b3_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_jdbm_jdbm",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_jdbm_jdbm_020_dev_jdbm_020_dev",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_jdbm_jdbm_0_20_dev",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_jdbm_jdbm_0_20_dev_jdbm_0_20_dev_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_jdbm_jdbm_0_20_dev_jdbm_0_20_dev_pom",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_repo1_maven_org_maven2_org_apache_commons_commons_math3_3_6_1_commons_math3_3_6_1_jar",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_rubygems_org_downloads",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_rubygems_org_downloads_macaddr_1_6_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_rubygems_org_downloads_macaddr_1_6_1_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_rubygems_org_downloads_open4_1_3_0_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_www_nuget_org_api_v2_package_entityframework_6_1_0",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_http_www_nuget_org_packages_sharpgis_gzipwebclient_1_2_0",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_api_github_com_repos_nexb_scancode_toolkit",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_api_github_com_repos_nexb_scancode_toolkit_commits_40593af0df6c8378d2b180324b97cb439fa11d66",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_api_nuget_org_v3_flatcontainer_newtonsoft_json_10_0_1_newtonsoft_json_10_0_1_nupkg",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_bitbucket_org_tg1999",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_bitbucket_org_tg1999_first_repo",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_bitbucket_org_tg1999_first_repo_new_folder",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_bitbucket_org_tg1999_first_repo_src",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_bitbucket_org_tg1999_first_repo_src_master_new_folder",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_bitbucket_org_tg1999_first_repo_src_qa",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_codeload_github_com_nexb_scancode_io_tar_gz_v1_0",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_codeload_github_com_nexb_scancode_toolkit_tar_gz_v3_1_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_codeload_github_com_nexb_scancode_toolkit_zip_v3_1_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_crates_io_api_v1_crates_clap_2_33_0_download",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_crates_io_api_v1_crates_rand_0_7_2_download",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_crates_io_api_v1_crates_structopt_0_3_11_download",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_files_pythonhosted_org_packages_7f_cf_12d4611fc67babd4ae250c9e8249c5650ae1933395488e9e7e3562b4ff24_amqp_2_3_2_py2_py3_none_any_whl_sha256_eed41946890cd43e8dee44a316b85cf6fee5a1a34bb4a562b660a358eb529e1b",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_files_pythonhosted_org_packages_87_44_0fa8e9d0cccb8eb86fc1b5170208229dc6d6e9fd6e57ea1fe19cbeea68f5_aboutcode_toolkit_3_4_0rc1_py2_py3_none_any_whl",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_nexb_scancode_toolkit_archive_v3_1_1_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_nexb_scancode_toolkit_tree_develop_plugins_scancode_ctags_macosx_10_9_intel",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_package_url_packageurl_js",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_package_url_packageurl_js_tree_master",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_package_url_packageurl_js_tree_master_test_data",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_tg1999",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_tg1999_fetchcode_fetchcode_src",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_tg1999_fetchcode_master",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_tg1999_fetchcode_tree",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_github_com_tg1999_fetchcode_tree_documentation_fetchcode",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_gitlab_com_tg1999_firebase",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_gitlab_com_tg1999_firebase_master",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_gitlab_com_tg1999_firebase_tree",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_gitlab_com_tg1999_firebase_tree_1a122122_views",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_gitlab_com_tg1999_firebase_tree_master",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_hackage_haskell_org_package_3d_graphics_examples_0_0_0_2_3d_graphics_examples_0_0_0_2_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_hackage_haskell_org_package_a50_0_5_a50_0_5_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_hackage_haskell_org_package_ac_halfinteger_1_2_1_ac_halfinteger_1_2_1_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_1e_75_8005d086cac4cc41d3b320d338972c5e5c6a21f88472f21ac9d0e031d300_pyahocorasick_1_1_4_tar_bz2_md5_ad445b6648dc06e9040705ce1ccb4384",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_34_c1_8806f99713ddb993c5366c362b2f908f18269f8d792aff1abfd700775a77_click_6_7_py2_py3_none_any_whl_md5_5e7a4e296b3212da2ff11017675d7a4d",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_38_e2_b23434f4030bbb1af3bcdbb2ecff6b11cf2e467622446ce66a08e99f2ea9_pluggy_0_4_0_zip_md5_447a92368175965d2fbacaef9f3df842",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_bd_e8_ea44ba5357a0b4fd16e5fb60c355fc8722eae31b93d7597eec50f7c35a52_pycryptodome_3_4_7_cp27_cp27m_win_amd64_whl_md5_f20bb847322baf7ae24700e5cbb15e07",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_f6_ae_bbc6a204f33d9d57c798fb3857a072cd14b836792244eea4b446fdb674c6_pycryptodome_3_4_7_cp27_cp27m_win32_whl_md5_78b341de1cd686077745cd9e3a93d8d3",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_py2_py3_w_wheel_bad_wheel_name_any_whl",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_py2_py3_w_wheel_wheel_0_29_0_py2_py3_none_any_whl",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_py2_py3_w_wheel_wheel_0_29_0_py2_py3_none_any_whl_md5_d7db45db5c131af262b8ffccde46a88a",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_source_p_python_openid_python_openid_2_2_5_zip",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_pypi_python_org_packages_source_z_zc_recipe_egg_zc_recipe_egg_2_0_0_tar_gz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_raw_githubusercontent_com_volatilityfoundation_dwarf2json_master_license_txt",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_automatta_automatta_0_0_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_fast_json_stable_stringify_fast_json_stable_stringify_2_0_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_invisionag_eslint_config_ivx_eslint_config_ivx_0_0_2_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_q_q_1_5_1_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_remove_trailing_separator_remove_trailing_separator_1_1_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_wide_align_wide_align_1_1_2_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_widest_line_widest_line_2_0_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_write_file_atomic_write_file_atomic_2_3_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_xdg_basedir_xdg_basedir_3_0_0_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_npmjs_org_yallist_yallist_2_1_2_tgz",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_yarnpkg_com_invisionag",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_yarnpkg_com_invisionag_2feslint_config_ivx",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_registry_yarnpkg_com_invisionag_eslint_config_ivx",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_actionmailer_4_0_3_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_activerecord_deprecated_finders_1_0_3_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_ejs_1_1_1_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_eventmachine_0_12_11_cloudfoundry_3_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_ffi_1_9_3_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_jwt_0_1_8_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_ref_1_0_5_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_talentbox_delayed_job_sequel_4_0_0_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_unf_0_1_3_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_rubygems_org_downloads_yajl_ruby_1_2_0_gem",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_www_nuget_org_api_v2_package_mvvmlightlibs_4_1_23",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_www_nuget_org_api_v2_package_newtonsoft_json_11_0_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_https_www_nuget_org_api_v2_package_twilio_3_4_1",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_maven_index_repo1_maven_org_ant_contrib_ant_contrib_1_0b3",
"tests/contrib/test_url2purl.py::TestURL2PURLDataDriven::test_url2purl_maven_index_repo1_maven_org_ant_contrib_ant_contrib_1_0b3_ant_contrib_1_0b3_jar",
"tests/test_packageurl.py::test_packageurl.python_safe_name",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_basic_valid_maven_purl_without_version",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_bitbucket_namespace_and_name_should_be_lowercased",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_debian_can_use_qualifiers",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_docker_uses_qualifiers_and_hash_image_id_as_versions",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_double_slash_after_scheme_is_not_significant",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_github_namespace_and_name_should_be_lowercased",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_a_name_is_required",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_a_scheme_is_always_required",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_a_type_is_always_required",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_is_invalid_checks_for_invalid_qualifier_keys",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_java_gem_can_use_a_qualifier",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_maven_can_come_with_a_type_qualifier",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_maven_often_uses_qualifiers",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_maven_pom_reference",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_npm_can_be_scoped",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_nuget_names_are_case_sensitive",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_pypi_names_have_special_rules_and_not_case_sensitive",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_rpm_often_use_qualifiers",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_slash_after_scheme_is_not_significant",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_slash_after_type_is_not_significant",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_go_purl_with_version_and_subpath",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_go_purl_without_version_and_with_subpath",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_maven_purl",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_maven_purl_containing_a_space_in_the_version_and_qualifier",
"tests/test_packageurl.py::PurlTest::test_purl_pkg_valid_maven_purl_with_case_sensitive_namespace_and_name",
"tests/test_packageurl.py::NormalizePurlTest::test_create_PackageURL_from_qualifiers_dict",
"tests/test_packageurl.py::NormalizePurlTest::test_create_PackageURL_from_qualifiers_string",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_decode_can_take_unicode_with_non_ascii_with_slash",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_encode_always_reencodes",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_encode_can_take_unicode_with_non_ascii_with_slash",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_qualifiers_as_dict",
"tests/test_packageurl.py::NormalizePurlTest::test_normalize_qualifiers_as_string",
"tests/test_packageurl.py::NormalizePurlTest::test_qualifiers_must_be_key_value_pairs",
"tests/test_packageurl.py::NormalizePurlTest::test_to_dict_custom_empty_value",
"tests/test_packageurl.py::NormalizePurlTest::test_to_dict_optionally_returns_qualifiers_as_string",
"tests/test_packageurl.py::test_purl_is_hashable"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-14 12:28:26+00:00
|
apache-2.0
| 4,438 |
|
pact-foundation__pact-python-35
|
diff --git a/pact/verify.py b/pact/verify.py
index ce33b2c..5add1fb 100644
--- a/pact/verify.py
+++ b/pact/verify.py
@@ -74,8 +74,8 @@ def main(base_url, pact_urls, states_url, states_setup_url, username,
'--pact-urls': ','.join(pact_urls),
'--provider-states-url': states_url,
'--provider-states-setup-url': states_setup_url,
- '--pact-broker-username': username,
- '--pact-broker-password': password
+ '--broker-username': username,
+ '--broker-password': password
}
command = [VERIFIER_PATH] + [
|
pact-foundation/pact-python
|
41fa3dfe3804b2c247593da396c5d58d78252e15
|
diff --git a/pact/test/test_verify.py b/pact/test/test_verify.py
index b30c513..e540f15 100644
--- a/pact/test/test_verify.py
+++ b/pact/test/test_verify.py
@@ -113,7 +113,7 @@ class mainTestCase(TestCase):
self.mock_Popen.return_value.returncode = 0
result = self.runner.invoke(verify.main, self.default_opts)
self.assertEqual(result.exit_code, 0)
- self.assertProcess(*self.default_call + ['--pact-broker-password=pwd'])
+ self.assertProcess(*self.default_call + ['--broker-password=pwd'])
self.mock_Popen.return_value.communicate.assert_called_once_with(
timeout=30)
@@ -135,8 +135,8 @@ class mainTestCase(TestCase):
'--pact-urls=./pacts/consumer-provider.json',
'--provider-states-url=http=//localhost/provider-states',
'--provider-states-setup-url=http://localhost/provider-states/set',
- '--pact-broker-username=user',
- '--pact-broker-password=pass')
+ '--broker-username=user',
+ '--broker-password=pass')
self.mock_Popen.return_value.communicate.assert_called_once_with(
timeout=60)
|
pact-verifier always results in an error when using pact broker with authentication
When I run:
```
pact-verifier --provider-base-url http://localhost:7599 --pact-urls https://my.pact.broker/pacts/provider/my-provider/consumer/my-consumer/latest --pact-broker-username username --pact-broker-password password
```
I always get:
```
ERROR: "pact-provider-verifier.rb verify" was called with arguments ["--pact-broker-username=username", "--pact-broker-password=password"]
Usage: "pact-provider-verifier.rb verify -h, --provider-base-url=PROVIDER_BASE_URL -u, --pact-urls=PACT_URLS"
```
|
0.0
|
41fa3dfe3804b2c247593da396c5d58d78252e15
|
[
"pact/test/test_verify.py::mainTestCase::test_all_options",
"pact/test/test_verify.py::mainTestCase::test_password_from_env_var"
] |
[
"pact/test/test_verify.py::mainTestCase::test_local_pact_urls_must_exist",
"pact/test/test_verify.py::mainTestCase::test_verification_timeout",
"pact/test/test_verify.py::mainTestCase::test_failed_verification",
"pact/test/test_verify.py::mainTestCase::test_successful_verification",
"pact/test/test_verify.py::mainTestCase::test_pact_urls_are_required",
"pact/test/test_verify.py::mainTestCase::test_provider_base_url_is_required",
"pact/test/test_verify.py::mainTestCase::test_must_provide_both_provide_states_options",
"pact/test/test_verify.py::path_existsTestCase::test_https",
"pact/test/test_verify.py::path_existsTestCase::test_file_does_not_exist",
"pact/test/test_verify.py::path_existsTestCase::test_file_does_exist",
"pact/test/test_verify.py::path_existsTestCase::test_http"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-06-27 02:16:36+00:00
|
mit
| 4,439 |
|
pact-foundation__pact-python-42
|
diff --git a/pact/verify.py b/pact/verify.py
index b9fd4cd..2af08f5 100644
--- a/pact/verify.py
+++ b/pact/verify.py
@@ -32,7 +32,8 @@ else:
multiple=True) # Remove in major version 1.0.0
@click.option(
'states_url', '--provider-states-url',
- help='URL to fetch the provider states for the given provider API.')
+ help='DEPRECATED: URL to fetch the provider states for'
+ ' the given provider API.') # Remove in major version 1.0.0
@click.option(
'states_setup_url', '--provider-states-setup-url',
help='URL to send PUT requests to setup a given provider state.')
@@ -61,13 +62,6 @@ def main(base_url, pact_url, pact_urls, states_url, states_setup_url, username,
""" # NOQA
error = click.style('Error:', fg='red')
warning = click.style('Warning:', fg='yellow')
- if bool(states_url) != bool(states_setup_url):
- click.echo(
- error
- + ' To use provider states you must provide both'
- ' --provider-states-url and --provider-states-setup-url.')
- raise click.Abort()
-
all_pact_urls = list(pact_url)
for urls in pact_urls: # Remove in major version 1.0.0
all_pact_urls.extend(p for p in urls.split(',') if p)
@@ -96,7 +90,6 @@ def main(base_url, pact_url, pact_urls, states_url, states_setup_url, username,
options = {
'--provider-base-url': base_url,
'--pact-urls': ','.join(all_pact_urls),
- '--provider-states-url': states_url,
'--provider-states-setup-url': states_setup_url,
'--broker-username': username,
'--broker-password': password
diff --git a/setup.py b/setup.py
index 6249850..1b355a8 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@ from setuptools.command.install import install
IS_64 = sys.maxsize > 2 ** 32
-PACT_STANDALONE_VERSION = '1.0.0'
+PACT_STANDALONE_VERSION = '1.1.1'
here = os.path.abspath(os.path.dirname(__file__))
|
pact-foundation/pact-python
|
04107db7f5ec2270fb0c3eb40e8a2c63dbe96050
|
diff --git a/pact/test/test_verify.py b/pact/test/test_verify.py
index 8f29345..06a12d3 100644
--- a/pact/test/test_verify.py
+++ b/pact/test/test_verify.py
@@ -74,17 +74,6 @@ class mainTestCase(TestCase):
self.assertIn(b'./pacts/consumer-provider.json', result.output_bytes)
self.assertFalse(self.mock_Popen.called)
- def test_must_provide_both_provide_states_options(self):
- result = self.runner.invoke(verify.main, [
- '--provider-base-url=http://localhost',
- '--pact-urls=./pacts/consumer-provider.json',
- '--provider-states-url=http://localhost/provider-state'
- ])
- self.assertEqual(result.exit_code, 1)
- self.assertIn(b'--provider-states-url', result.output_bytes)
- self.assertIn(b'--provider-states-setup-url', result.output_bytes)
- self.assertFalse(self.mock_Popen.called)
-
def test_verification_timeout(self):
self.mock_Popen.return_value.communicate.side_effect = TimeoutExpired(
[], 30)
@@ -129,7 +118,6 @@ class mainTestCase(TestCase):
'./pacts/consumer-provider2.json',
'--pact-url=./pacts/consumer-provider3.json',
'--pact-url=./pacts/consumer-provider4.json',
- '--provider-states-url=http=//localhost/provider-states',
'--provider-states-setup-url=http://localhost/provider-states/set',
'--pact-broker-username=user',
'--pact-broker-password=pass',
@@ -142,7 +130,6 @@ class mainTestCase(TestCase):
'--pact-urls=./pacts/consumer-provider3.json,'
'./pacts/consumer-provider4.json,'
'./pacts/consumer-provider.json,./pacts/consumer-provider2.json',
- '--provider-states-url=http=//localhost/provider-states',
'--provider-states-setup-url=http://localhost/provider-states/set',
'--broker-username=user',
'--broker-password=pass')
|
--provider-states-url will be deprecated in a matter of days
I have updated the underlying code so this is no longer needed. I'll ping you when the new package is out.
|
0.0
|
04107db7f5ec2270fb0c3eb40e8a2c63dbe96050
|
[
"pact/test/test_verify.py::mainTestCase::test_all_options"
] |
[
"pact/test/test_verify.py::mainTestCase::test_local_pact_urls_must_exist",
"pact/test/test_verify.py::mainTestCase::test_deprecated_pact_urls",
"pact/test/test_verify.py::mainTestCase::test_pact_urls_are_required",
"pact/test/test_verify.py::mainTestCase::test_provider_base_url_is_required",
"pact/test/test_verify.py::mainTestCase::test_password_from_env_var",
"pact/test/test_verify.py::mainTestCase::test_verification_timeout",
"pact/test/test_verify.py::mainTestCase::test_successful_verification",
"pact/test/test_verify.py::mainTestCase::test_failed_verification",
"pact/test/test_verify.py::path_existsTestCase::test_file_does_not_exist",
"pact/test/test_verify.py::path_existsTestCase::test_https",
"pact/test/test_verify.py::path_existsTestCase::test_http",
"pact/test/test_verify.py::path_existsTestCase::test_file_does_exist"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-29 16:54:06+00:00
|
mit
| 4,440 |
|
pact-foundation__pact-python-49
|
diff --git a/README.md b/README.md
index 1a494ec..434230f 100644
--- a/README.md
+++ b/README.md
@@ -156,24 +156,25 @@ as `generate`, in this case `2016-12-15T20:16:01`. When the contract is verified
provider, the regex will be used to search the response from the real provider service
and the test will be considered successful if the regex finds a match in the response.
-### SomethingLike(matcher)
+### Like(matcher)
Asserts the element's type matches the matcher. For example:
```python
-from pact import SomethingLike
-SomethingLike(123) # Matches if the value is an integer
-SomethingLike('hello world') # Matches if the value is a string
-SomethingLike(3.14) # Matches if the value is a float
+from pact import Like
+Like(123) # Matches if the value is an integer
+Like('hello world') # Matches if the value is a string
+Like(3.14) # Matches if the value is a float
```
-The argument supplied to `SomethingLike` will be what the mock service responds with.
+The argument supplied to `Like` will be what the mock service responds with.
-When a dictionary is used as an argument for SomethingLike, all the child objects (and their child objects etc.) will be matched according to their types, unless you use a more specific matcher like a Term.
+When a dictionary is used as an argument for Like, all the child objects (and their child objects etc.) will be matched according to their types, unless you use a more specific matcher like a Term.
```python
-SomethingLike({
+from pact import Like, Term
+Like({
'username': Term('[a-zA-Z]+', 'username'),
'id': 123, # integer
- 'confirmed': false, # boolean
+ 'confirmed': False, # boolean
'address': { # dictionary
'street': '200 Bourke St' # string
}
@@ -194,7 +195,7 @@ EachLike('hello') # All items are strings
Or other matchers can be nested inside to assert more complex objects:
```python
-from pact import EachLike, SomethingLike, Term
+from pact import EachLike, Term
EachLike({
'username': Term('[a-zA-Z]+', 'username'),
'id': 123,
diff --git a/pact/__init__.py b/pact/__init__.py
index 37fe01b..31fc709 100644
--- a/pact/__init__.py
+++ b/pact/__init__.py
@@ -1,8 +1,9 @@
"""Python methods for interactive with a Pact Mock Service."""
from .consumer import Consumer
-from .matchers import EachLike, SomethingLike, Term
+from .matchers import EachLike, Like, SomethingLike, Term
from .pact import Pact
from .provider import Provider
from .__version__ import __version__ # noqa: F401
-__all__ = ('Consumer', 'EachLike', 'Pact', 'Provider', 'SomethingLike', 'Term')
+__all__ = ('Consumer', 'EachLike', 'Like', 'Pact', 'Provider', 'SomethingLike',
+ 'Term')
diff --git a/pact/matchers.py b/pact/matchers.py
index 77d8d2e..76810c2 100644
--- a/pact/matchers.py
+++ b/pact/matchers.py
@@ -66,7 +66,7 @@ class EachLike(Matcher):
'min': self.minimum}
-class SomethingLike(Matcher):
+class Like(Matcher):
"""
Expect the type of the value to be the same as matcher.
@@ -79,7 +79,7 @@ class SomethingLike(Matcher):
... .upon_receiving('a request for a random number')
... .with_request('get', '/generate-number')
... .will_respond_with(200, body={
- ... 'number': SomethingLike(1111222233334444)
+ ... 'number': Like(1111222233334444)
... }))
Would expect the response body to be a JSON object, containing the key
@@ -120,6 +120,10 @@ class SomethingLike(Matcher):
'contents': from_term(self.matcher)}
+# Remove SomethingLike in major version 1.0.0
+SomethingLike = Like
+
+
class Term(Matcher):
"""
Expect the response to match a specified regular expression.
|
pact-foundation/pact-python
|
a07c8b6b230e16e89e8493eec3c6a506e6a67166
|
diff --git a/pact/test/test_matchers.py b/pact/test/test_matchers.py
index 4d3b1ee..666991d 100644
--- a/pact/test/test_matchers.py
+++ b/pact/test/test_matchers.py
@@ -1,6 +1,6 @@
from unittest import TestCase
-from ..matchers import EachLike, Matcher, SomethingLike, Term, from_term
+from ..matchers import EachLike, Like, Matcher, SomethingLike, Term, from_term
class MatcherTestCase(TestCase):
@@ -56,6 +56,11 @@ class EachLikeTestCase(TestCase):
'min': 1})
+class LikeTestCase(TestCase):
+ def test_is_something_like(self):
+ self.assertIs(SomethingLike, Like)
+
+
class SomethingLikeTestCase(TestCase):
def test_valid_types(self):
types = [None, list(), dict(), 1, 1.0, 'string', u'unicode', Matcher()]
|
Suggestion: rename SomethingLike to just Like
When the SomethingLike was invented, it was a bit of a hack to help out with some brittle tests we had at REA, and I never thought it would become the standard tool it has become! In the other implementations, we're using just the name "Like" rather than "SomethingLike". It's a bit pithier. Can I suggest that you do the same for python?
|
0.0
|
a07c8b6b230e16e89e8493eec3c6a506e6a67166
|
[
"pact/test/test_matchers.py::MatcherTestCase::test_generate",
"pact/test/test_matchers.py::EachLikeTestCase::test_default_options",
"pact/test/test_matchers.py::EachLikeTestCase::test_minimum",
"pact/test/test_matchers.py::EachLikeTestCase::test_minimum_assertion_error",
"pact/test/test_matchers.py::EachLikeTestCase::test_nested_matchers",
"pact/test/test_matchers.py::LikeTestCase::test_is_something_like",
"pact/test/test_matchers.py::SomethingLikeTestCase::test_basic_type",
"pact/test/test_matchers.py::SomethingLikeTestCase::test_complex_type",
"pact/test/test_matchers.py::SomethingLikeTestCase::test_invalid_types",
"pact/test/test_matchers.py::SomethingLikeTestCase::test_valid_types",
"pact/test/test_matchers.py::TermTestCase::test_regex",
"pact/test/test_matchers.py::FromTermTestCase::test_dict",
"pact/test/test_matchers.py::FromTermTestCase::test_each_like",
"pact/test/test_matchers.py::FromTermTestCase::test_float",
"pact/test/test_matchers.py::FromTermTestCase::test_int",
"pact/test/test_matchers.py::FromTermTestCase::test_list",
"pact/test/test_matchers.py::FromTermTestCase::test_nested",
"pact/test/test_matchers.py::FromTermTestCase::test_none",
"pact/test/test_matchers.py::FromTermTestCase::test_something_like",
"pact/test/test_matchers.py::FromTermTestCase::test_term",
"pact/test/test_matchers.py::FromTermTestCase::test_unicode",
"pact/test/test_matchers.py::FromTermTestCase::test_unknown_type"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-09-04 20:50:48+00:00
|
mit
| 4,441 |
|
pact-foundation__pact-python-94
|
diff --git a/pact/pact.py b/pact/pact.py
index e855274..45326de 100644
--- a/pact/pact.py
+++ b/pact/pact.py
@@ -38,6 +38,8 @@ class Pact(object):
HEADERS = {'X-Pact-Mock-Service': 'true'}
+ MANDATORY_FIELDS = {'response', 'description', 'request'}
+
def __init__(self, consumer, provider, host_name='localhost', port=1234,
log_dir=None, ssl=False, sslcert=None, sslkey=None,
cors=False, pact_dir=None, version='2.0.0',
@@ -114,7 +116,8 @@ class Pact(object):
:type provider_state: basestring
:rtype: Pact
"""
- self._interactions.insert(0, {'provider_state': provider_state})
+ self._insert_interaction_if_complete()
+ self._interactions[0]['provider_state'] = provider_state
return self
def setup(self):
@@ -190,6 +193,7 @@ class Pact(object):
:type scenario: basestring
:rtype: Pact
"""
+ self._insert_interaction_if_complete()
self._interactions[0]['description'] = scenario
return self
@@ -231,6 +235,7 @@ class Pact(object):
:type query: dict, basestring, or None
:rtype: Pact
"""
+ self._insert_interaction_if_complete()
self._interactions[0]['request'] = Request(
method, path, body=body, headers=headers, query=query).json()
return self
@@ -248,11 +253,27 @@ class Pact(object):
:type body: Matcher, dict, list, basestring, or None
:rtype: Pact
"""
+ self._insert_interaction_if_complete()
self._interactions[0]['response'] = Response(status,
headers=headers,
body=body).json()
return self
+ def _insert_interaction_if_complete(self):
+ """
+ Insert a new interaction if current interaction is complete.
+
+ An interaction is complete if it has all the mandatory fields.
+ If there are no interactions, a new interaction will be added.
+
+ :rtype: None
+ """
+ if not self._interactions:
+ self._interactions.append({})
+ elif all(field in self._interactions[0]
+ for field in self.MANDATORY_FIELDS):
+ self._interactions.insert(0, {})
+
def _wait_for_server_start(self):
"""
Wait for the mock service to be ready for requests.
diff --git a/setup.py b/setup.py
index 4d7c021..e5806be 100644
--- a/setup.py
+++ b/setup.py
@@ -109,7 +109,7 @@ def read(filename):
dependencies = [
- 'click>=2.0.0,<=6.7',
+ 'click>=7.0.0',
'psutil>=2.0.0',
'requests>=2.5.0',
'six>=1.9.0',
|
pact-foundation/pact-python
|
68e792a5ca4820ee1d97b35565f875fa43bd4dcc
|
diff --git a/pact/test/test_pact.py b/pact/test/test_pact.py
index 2cf5539..64c061f 100644
--- a/pact/test/test_pact.py
+++ b/pact/test/test_pact.py
@@ -76,6 +76,27 @@ class PactTestCase(TestCase):
self.assertEqual(target._interactions[0]['response'],
{'status': 200, 'body': 'success'})
+ def test_definition_without_given(self):
+ target = Pact(self.consumer, self.provider)
+ (target
+ .upon_receiving('a specific request to the server')
+ .with_request('GET', '/path')
+ .will_respond_with(200, body='success'))
+
+ self.assertEqual(len(target._interactions), 1)
+
+ self.assertIsNone(
+ target._interactions[0].get('provider_state'))
+
+ self.assertEqual(
+ target._interactions[0]['description'],
+ 'a specific request to the server')
+
+ self.assertEqual(target._interactions[0]['request'],
+ {'path': '/path', 'method': 'GET'})
+ self.assertEqual(target._interactions[0]['response'],
+ {'status': 200, 'body': 'success'})
+
def test_definition_all_options(self):
target = Pact(self.consumer, self.provider)
(target
@@ -146,6 +167,40 @@ class PactTestCase(TestCase):
self.assertEqual(target._interactions[0]['response'],
{'status': 200, 'body': 'success'})
+ def test_definition_multiple_interactions_without_given(self):
+ target = Pact(self.consumer, self.provider)
+ (target
+ .upon_receiving('a specific request to the server')
+ .with_request('GET', '/foo')
+ .will_respond_with(200, body='success')
+ .upon_receiving('a different request to the server')
+ .with_request('GET', '/bar')
+ .will_respond_with(200, body='success'))
+
+ self.assertEqual(len(target._interactions), 2)
+
+ self.assertIsNone(
+ target._interactions[1].get('provider_state'))
+ self.assertIsNone(
+ target._interactions[0].get('provider_state'))
+
+ self.assertEqual(
+ target._interactions[1]['description'],
+ 'a specific request to the server')
+ self.assertEqual(
+ target._interactions[0]['description'],
+ 'a different request to the server')
+
+ self.assertEqual(target._interactions[1]['request'],
+ {'path': '/foo', 'method': 'GET'})
+ self.assertEqual(target._interactions[0]['request'],
+ {'path': '/bar', 'method': 'GET'})
+
+ self.assertEqual(target._interactions[1]['response'],
+ {'status': 200, 'body': 'success'})
+ self.assertEqual(target._interactions[0]['response'],
+ {'status': 200, 'body': 'success'})
+
class PactSetupTestCase(PactTestCase):
def setUp(self):
diff --git a/pact/test/test_verify.py b/pact/test/test_verify.py
index f0b77a7..cdd1aad 100644
--- a/pact/test/test_verify.py
+++ b/pact/test/test_verify.py
@@ -69,7 +69,7 @@ class mainTestCase(TestCase):
def test_provider_base_url_is_required(self):
result = self.runner.invoke(verify.main, [])
self.assertEqual(result.exit_code, 2)
- self.assertIn(b'--provider-base-url', result.output_bytes)
+ self.assertIn(b'--provider-base-url', result.stdout_bytes)
self.assertFalse(self.mock_Popen.called)
def test_pact_urls_are_required(self):
@@ -77,14 +77,14 @@ class mainTestCase(TestCase):
verify.main, ['--provider-base-url=http://localhost'])
self.assertEqual(result.exit_code, 1)
- self.assertIn(b'at least one', result.output_bytes)
+ self.assertIn(b'at least one', result.stdout_bytes)
self.assertFalse(self.mock_Popen.called)
def test_local_pact_urls_must_exist(self):
self.mock_isfile.return_value = False
result = self.runner.invoke(verify.main, self.default_opts)
self.assertEqual(result.exit_code, 1)
- self.assertIn(b'./pacts/consumer-provider.json', result.output_bytes)
+ self.assertIn(b'./pacts/consumer-provider.json', result.stdout_bytes)
self.assertFalse(self.mock_Popen.called)
def test_failed_verification(self):
@@ -150,7 +150,7 @@ class mainTestCase(TestCase):
self.assertEqual(result.exit_code, 0)
self.assertIn(
b'Multiple --pact-urls arguments are deprecated.',
- result.output_bytes)
+ result.stdout_bytes)
self.mock_Popen.return_value.wait.assert_called_once_with()
self.assertEqual(self.mock_Popen.call_count, 1)
self.assertProcess(
@@ -166,7 +166,7 @@ class mainTestCase(TestCase):
])
self.assertEqual(result.exit_code, 1)
self.assertIn(
- b'Provider application version is required', result.output_bytes)
+ b'Provider application version is required', result.stdout_bytes)
self.assertFalse(self.mock_Popen.return_value.communicate.called)
|
`given`/provider state should not be necessary
Only `Pact.given()` inserts a new element into the `_interactions` array, meaning that if you don't call it for the *first* test, you'll get an exception when calling any other interaction-related methods, and if you don't call it for *subsequent* tests, you'll overwrite the previous interaction (possibly partially, leading to nonsense).
According to [the JSON schema](https://bitbucket.org/atlassian/pact-json-schema/src/8d0b9d2f38fe517dfbce0e5d0b8755eaefaacb5b/schemas/v2/schema.json?at=master&fileviewer=file-view-default#schema.json-16:30), `providerState` is an optional field, which makes sense, since not every interaction *will* require state to be set up.
As is, `pact-python` requires a bogus `given()` call for stateless interactions (and therefore requires the provider to handle that bogus state-setting).
(Less seriously, but still I think inelegantly, this also means that `given` has to be called *first*, even though the mock-English encouraged by the method names and chaining would suggest that there's no difference between "given …, upon receiving …, will respond with …" and "will respond with …, upon receiving …, given …"
|
0.0
|
68e792a5ca4820ee1d97b35565f875fa43bd4dcc
|
[
"pact/test/test_pact.py::PactTestCase::test_definition_without_given",
"pact/test/test_pact.py::PactTestCase::test_definition_multiple_interactions_without_given",
"pact/test/test_pact.py::PactContextManagerTestCase::test_definition_without_given",
"pact/test/test_pact.py::PactContextManagerTestCase::test_definition_multiple_interactions_without_given",
"pact/test/test_pact.py::PactVerifyTestCase::test_definition_without_given",
"pact/test/test_pact.py::PactVerifyTestCase::test_definition_multiple_interactions_without_given",
"pact/test/test_pact.py::PactSetupTestCase::test_definition_multiple_interactions_without_given",
"pact/test/test_pact.py::PactSetupTestCase::test_definition_without_given"
] |
[
"pact/test/test_pact.py::PactTestCase::test_definition_multiple_interactions",
"pact/test/test_pact.py::PactTestCase::test_definition_all_options",
"pact/test/test_pact.py::PactTestCase::test_init_custom_mock_service",
"pact/test/test_pact.py::PactTestCase::test_definition_sparse",
"pact/test/test_pact.py::PactTestCase::test_init_defaults",
"pact/test/test_pact.py::FromTermsTestCase::test_json",
"pact/test/test_pact.py::PactContextManagerTestCase::test_init_custom_mock_service",
"pact/test/test_pact.py::PactContextManagerTestCase::test_definition_sparse",
"pact/test/test_pact.py::PactContextManagerTestCase::test_context_raises_error",
"pact/test/test_pact.py::PactContextManagerTestCase::test_definition_multiple_interactions",
"pact/test/test_pact.py::PactContextManagerTestCase::test_init_defaults",
"pact/test/test_pact.py::PactContextManagerTestCase::test_definition_all_options",
"pact/test/test_pact.py::PactContextManagerTestCase::test_successful",
"pact/test/test_pact.py::PactWaitForServerStartTestCase::test_wait_for_server_start_success",
"pact/test/test_pact.py::PactWaitForServerStartTestCase::test_wait_for_server_start_failure",
"pact/test/test_pact.py::ResponseTestCase::test_all_options",
"pact/test/test_pact.py::ResponseTestCase::test_falsey_body",
"pact/test/test_pact.py::ResponseTestCase::test_sparse",
"pact/test/test_pact.py::RequestTestCase::test_sparse",
"pact/test/test_pact.py::RequestTestCase::test_all_options",
"pact/test/test_pact.py::RequestTestCase::test_matcher_in_path_gets_converted",
"pact/test/test_pact.py::RequestTestCase::test_falsey_body",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_stop_windows",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_stop_fails_posix",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_stop_posix",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_stop_fails_windows",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_start_no_ssl",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_start_fails",
"pact/test/test_pact.py::PactStartShutdownServerTestCase::test_start_with_ssl",
"pact/test/test_pact.py::PactVerifyTestCase::test_init_custom_mock_service",
"pact/test/test_pact.py::PactVerifyTestCase::test_definition_all_options",
"pact/test/test_pact.py::PactVerifyTestCase::test_definition_sparse",
"pact/test/test_pact.py::PactVerifyTestCase::test_definition_multiple_interactions",
"pact/test/test_pact.py::PactVerifyTestCase::test_init_defaults",
"pact/test/test_pact.py::PactSetupTestCase::test_init_custom_mock_service",
"pact/test/test_pact.py::PactSetupTestCase::test_definition_all_options",
"pact/test/test_pact.py::PactSetupTestCase::test_error_posting_interactions",
"pact/test/test_pact.py::PactSetupTestCase::test_successful",
"pact/test/test_pact.py::PactSetupTestCase::test_definition_sparse",
"pact/test/test_pact.py::PactSetupTestCase::test_init_defaults",
"pact/test/test_pact.py::PactSetupTestCase::test_error_deleting_interactions",
"pact/test/test_pact.py::PactSetupTestCase::test_definition_multiple_interactions",
"pact/test/test_verify.py::path_existsTestCase::test_file_does_exist",
"pact/test/test_verify.py::path_existsTestCase::test_https",
"pact/test/test_verify.py::path_existsTestCase::test_http",
"pact/test/test_verify.py::path_existsTestCase::test_file_does_not_exist",
"pact/test/test_verify.py::rerun_commandTestCase::test_posix",
"pact/test/test_verify.py::rerun_commandTestCase::test_windows",
"pact/test/test_verify.py::mainTestCase::test_all_options",
"pact/test/test_verify.py::mainTestCase::test_successful_verification",
"pact/test/test_verify.py::mainTestCase::test_failed_verification",
"pact/test/test_verify.py::mainTestCase::test_password_from_env_var",
"pact/test/test_verify.py::sanitize_logsTestCase::test_terse",
"pact/test/test_verify.py::sanitize_logsTestCase::test_verbose",
"pact/test/test_verify.py::expand_directoriesTestCase::test_file",
"pact/test/test_verify.py::expand_directoriesTestCase::test_windows_directories",
"pact/test/test_verify.py::expand_directoriesTestCase::test_uri",
"pact/test/test_verify.py::expand_directoriesTestCase::test_directory"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-20 03:15:04+00:00
|
mit
| 4,442 |
|
palantir__python-jsonrpc-server-37
|
diff --git a/pyls_jsonrpc/endpoint.py b/pyls_jsonrpc/endpoint.py
index e8bfb5b..0caf612 100644
--- a/pyls_jsonrpc/endpoint.py
+++ b/pyls_jsonrpc/endpoint.py
@@ -236,6 +236,7 @@ class Endpoint(object):
if error is not None:
log.debug("Received error response to message %s: %s", msg_id, error)
request_future.set_exception(JsonRpcException.from_dict(error))
+ return
log.debug("Received result for message %s: %s", msg_id, result)
request_future.set_result(result)
|
palantir/python-jsonrpc-server
|
12d1f5125a103852fc8a2048508227ff13b91210
|
diff --git a/test/test_endpoint.py b/test/test_endpoint.py
index 47a038b..b954732 100644
--- a/test/test_endpoint.py
+++ b/test/test_endpoint.py
@@ -115,9 +115,9 @@ def test_request_cancel(endpoint, consumer):
'params': {'id': MSG_ID}
})
- with pytest.raises(exceptions.JsonRpcException) as exc_info:
+ with pytest.raises((exceptions.JsonRpcException, futures.CancelledError)) as exc_info:
assert future.result(timeout=2)
- assert exc_info.type == exceptions.JsonRpcRequestCancelled
+ assert exc_info.type in (exceptions.JsonRpcRequestCancelled, futures.CancelledError)
def test_consume_notification(endpoint, dispatcher):
diff --git a/test/test_streams.py b/test/test_streams.py
index 8c2e93e..480a73b 100644
--- a/test/test_streams.py
+++ b/test/test_streams.py
@@ -97,7 +97,8 @@ def test_writer(wfile, writer):
def test_writer_bad_message(wfile, writer):
# A datetime isn't serializable(or poorly serializable),
- # ensure the write method doesn't throw
+ # ensure the write method doesn't throw, but the result could be empty
+ # or the correct datetime
import datetime
writer.write(datetime.datetime(
year=2019,
@@ -108,12 +109,10 @@ def test_writer_bad_message(wfile, writer):
second=1,
))
- if os.name == 'nt':
- assert wfile.getvalue() == b''
- else:
- assert wfile.getvalue() == (
- b'Content-Length: 10\r\n'
- b'Content-Type: application/vscode-jsonrpc; charset=utf8\r\n'
- b'\r\n'
- b'1546304461'
- )
+ assert wfile.getvalue() in [
+ b'',
+ b'Content-Length: 10\r\n'
+ b'Content-Type: application/vscode-jsonrpc; charset=utf8\r\n'
+ b'\r\n'
+ b'1546304461'
+ ]
|
Tests failing with Python 3.8
```
[ 11s] =================================== FAILURES ===================================
[ 11s] ______________________________ test_request_error ______________________________
[ 11s]
[ 11s] endpoint = <pyls_jsonrpc.endpoint.Endpoint object at 0x7f8e1ab39ee0>
[ 11s] consumer = <MagicMock id='140248310062672'>
[ 11s]
[ 11s] def test_request_error(endpoint, consumer):
[ 11s] future = endpoint.request('methodName', {'key': 'value'})
[ 11s] assert not future.done()
[ 11s]
[ 11s] consumer.assert_called_once_with({
[ 11s] 'jsonrpc': '2.0',
[ 11s] 'id': MSG_ID,
[ 11s] 'method': 'methodName',
[ 11s] 'params': {'key': 'value'}
[ 11s] })
[ 11s]
[ 11s] # Send an error back from the client
[ 11s] error = exceptions.JsonRpcInvalidRequest(data=1234)
[ 11s] > endpoint.consume({
[ 11s] 'jsonrpc': '2.0',
[ 11s] 'id': MSG_ID,
[ 11s] 'error': error.to_dict()
[ 11s] })
[ 11s]
[ 11s] test/test_endpoint.py:86:
[ 11s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 11s] pyls_jsonrpc/endpoint.py:109: in consume
[ 11s] self._handle_response(message['id'], message.get('result'), message.get('error'))
[ 11s] pyls_jsonrpc/endpoint.py:241: in _handle_response
[ 11s] request_future.set_result(result)
[ 11s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 11s]
[ 11s] self = <Future at 0x7f8e1ab3fb20 state=finished raised JsonRpcInvalidRequest>
[ 11s] result = None
[ 11s]
[ 11s] def set_result(self, result):
[ 11s] """Sets the return value of work associated with the future.
[ 11s]
[ 11s] Should only be used by Executor implementations and unit tests.
[ 11s] """
[ 11s] with self._condition:
[ 11s] if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}:
[ 11s] > raise InvalidStateError('{}: {!r}'.format(self._state, self))
[ 11s] E concurrent.futures._base.InvalidStateError: FINISHED: <Future at 0x7f8e1ab3fb20 state=finished raised JsonRpcInvalidRequest>
[ 11s]
[ 11s] /usr/lib64/python3.8/concurrent/futures/_base.py:524: InvalidStateError
[ 11s] _____________________________ test_request_cancel ______________________________
[ 11s]
[ 11s] endpoint = <pyls_jsonrpc.endpoint.Endpoint object at 0x7f8e1ad6f670>
[ 11s] consumer = <MagicMock id='140248312379232'>
[ 11s]
[ 11s] def test_request_cancel(endpoint, consumer):
[ 11s] future = endpoint.request('methodName', {'key': 'value'})
[ 11s] assert not future.done()
[ 11s]
[ 11s] consumer.assert_called_once_with({
[ 11s] 'jsonrpc': '2.0',
[ 11s] 'id': MSG_ID,
[ 11s] 'method': 'methodName',
[ 11s] 'params': {'key': 'value'}
[ 11s] })
[ 11s]
[ 11s] # Cancel the request
[ 11s] future.cancel()
[ 11s] consumer.assert_any_call({
[ 11s] 'jsonrpc': '2.0',
[ 11s] 'method': '$/cancelRequest',
[ 11s] 'params': {'id': MSG_ID}
[ 11s] })
[ 11s]
[ 11s] with pytest.raises(exceptions.JsonRpcException) as exc_info:
[ 11s] > assert future.result(timeout=2)
[ 11s]
[ 11s] test/test_endpoint.py:119:
[ 11s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 11s]
[ 11s] self = <Future at 0x7f8e1ab372b0 state=cancelled>, timeout = 2
[ 11s]
[ 11s] def result(self, timeout=None):
[ 11s] """Return the result of the call that the future represents.
[ 11s]
[ 11s] Args:
[ 11s] timeout: The number of seconds to wait for the result if the future
[ 11s] isn't done. If None, then there is no limit on the wait time.
[ 11s]
[ 11s] Returns:
[ 11s] The result of the call that the future represents.
[ 11s]
[ 11s] Raises:
[ 11s] CancelledError: If the future was cancelled.
[ 11s] TimeoutError: If the future didn't finish executing before the given
[ 11s] timeout.
[ 11s] Exception: If the call raised then that exception will be raised.
[ 11s] """
[ 11s] with self._condition:
[ 11s] if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
[ 11s] > raise CancelledError()
[ 11s] E concurrent.futures._base.CancelledError
[ 11s]
[ 11s] /usr/lib64/python3.8/concurrent/futures/_base.py:430: CancelledError
[ 11s] ------------------------------ Captured log call -------------------------------
[ 11s] ERROR concurrent.futures:_base.py:330 exception calling callback for <Future at 0x7f8e1ab372b0 state=cancelled>
[ 11s] Traceback (most recent call last):
[ 11s] File "/usr/lib64/python3.8/concurrent/futures/_base.py", line 328, in _invoke_callbacks
[ 11s] callback(self)
[ 11s] File "/home/abuild/rpmbuild/BUILD/python-jsonrpc-server-0.3.4/pyls_jsonrpc/endpoint.py", line 91, in callback
[ 11s] future.set_exception(JsonRpcRequestCancelled())
[ 11s] File "/usr/lib64/python3.8/concurrent/futures/_base.py", line 539, in set_exception
[ 11s] raise InvalidStateError('{}: {!r}'.format(self._state, self))
[ 11s] concurrent.futures._base.InvalidStateError: CANCELLED: <Future at 0x7f8e1ab372b0 state=cancelled>
[ 11s] ___________________________ test_writer_bad_message ____________________________
[ 11s]
[ 11s] wfile = <_io.BytesIO object at 0x7f8e1ab2d5e0>
[ 11s] writer = <pyls_jsonrpc.streams.JsonRpcStreamWriter object at 0x7f8e1aab2580>
[ 11s]
[ 11s] def test_writer_bad_message(wfile, writer):
[ 11s] # A datetime isn't serializable(or poorly serializable),
[ 11s] # ensure the write method doesn't throw
[ 11s] import datetime
[ 11s] writer.write(datetime.datetime(
[ 11s] year=2019,
[ 11s] month=1,
[ 11s] day=1,
[ 11s] hour=1,
[ 11s] minute=1,
[ 11s] second=1,
[ 11s] ))
[ 11s]
[ 11s] if os.name == 'nt':
[ 11s] assert wfile.getvalue() == b''
[ 11s] else:
[ 11s] > assert wfile.getvalue() == (
[ 11s] b'Content-Length: 10\r\n'
[ 11s] b'Content-Type: application/vscode-jsonrpc; charset=utf8\r\n'
[ 11s] b'\r\n'
[ 11s] b'1546304461'
[ 11s] )
[ 11s] E AssertionError: assert b'' == b'Content-Len...r\n1546304461'
[ 11s] E Full diff:
[ 11s] E - b''
[ 11s] E + (
[ 11s] E + b'Content-Length: 10\r\nContent-Type: application/vscode-jsonrpc; charset=ut'
[ 11s] E + b'f8\r\n\r\n1546304461',
[ 11s] E + )
[ 11s]
[ 11s] test/test_streams.py:114: AssertionError
[ 11s] ------------------------------ Captured log call -------------------------------
[ 11s] ERROR pyls_jsonrpc.streams:streams.py:112 Failed to write message to output file 2019-01-01 01:01:01
[ 11s] Traceback (most recent call last):
[ 11s] File "/home/abuild/rpmbuild/BUILD/python-jsonrpc-server-0.3.4/pyls_jsonrpc/streams.py", line 98, in write
[ 11s] body = json.dumps(message, **self._json_dumps_args)
[ 11s] TypeError: � is not JSON serializable
[ 11s] =============================== warnings summary ===============================
[ 11s] test/test_endpoint.py::test_bad_message
[ 11s] /home/abuild/rpmbuild/BUILD/python-jsonrpc-server-0.3.4/pyls_jsonrpc/endpoint.py:101: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead
[ 11s] log.warn("Unknown message type %s", message)
[ 11s]
[ 11s] test/test_endpoint.py::test_consume_notification_method_not_found
[ 11s] /home/abuild/rpmbuild/BUILD/python-jsonrpc-server-0.3.4/pyls_jsonrpc/endpoint.py:138: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead
[ 11s] log.warn("Ignoring notification for unknown method %s", method)
[ 11s]
[ 11s] test/test_endpoint.py::test_consume_request_cancel_unknown
[ 11s] /home/abuild/rpmbuild/BUILD/python-jsonrpc-server-0.3.4/pyls_jsonrpc/endpoint.py:168: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead
[ 11s] log.warn("Received cancel notification for unknown message id %s", msg_id)
[ 11s]
[ 11s] -- Docs: https://docs.pytest.org/en/latest/warnings.html
[ 11s] =================== 3 failed, 24 passed, 3 warnings in 0.20s ===================
```
[Full build log with all details](https://github.com/palantir/python-jsonrpc-server/files/4328630/_log.txt)
|
0.0
|
12d1f5125a103852fc8a2048508227ff13b91210
|
[
"test/test_endpoint.py::test_request_error"
] |
[
"test/test_endpoint.py::test_bad_message",
"test/test_endpoint.py::test_notify",
"test/test_endpoint.py::test_notify_none_params",
"test/test_endpoint.py::test_request",
"test/test_endpoint.py::test_request_cancel",
"test/test_endpoint.py::test_consume_notification",
"test/test_endpoint.py::test_consume_notification_error",
"test/test_endpoint.py::test_consume_notification_method_not_found",
"test/test_endpoint.py::test_consume_async_notification_error",
"test/test_endpoint.py::test_consume_request",
"test/test_endpoint.py::test_consume_future_request",
"test/test_endpoint.py::test_consume_async_request",
"test/test_endpoint.py::test_consume_async_request_error[ValueError-error0]",
"test/test_endpoint.py::test_consume_async_request_error[KeyError-error1]",
"test/test_endpoint.py::test_consume_async_request_error[JsonRpcMethodNotFound-error2]",
"test/test_endpoint.py::test_consume_request_method_not_found",
"test/test_endpoint.py::test_consume_request_error[ValueError-error0]",
"test/test_endpoint.py::test_consume_request_error[KeyError-error1]",
"test/test_endpoint.py::test_consume_request_error[JsonRpcMethodNotFound-error2]",
"test/test_endpoint.py::test_consume_request_cancel",
"test/test_endpoint.py::test_consume_request_cancel_unknown",
"test/test_streams.py::test_reader",
"test/test_streams.py::test_reader_bad_message",
"test/test_streams.py::test_reader_bad_json",
"test/test_streams.py::test_writer",
"test/test_streams.py::test_writer_bad_message"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-01 20:01:17+00:00
|
mit
| 4,443 |
|
palantir__python-language-server-107
|
diff --git a/pyls/_utils.py b/pyls/_utils.py
new file mode 100644
index 0000000..b185454
--- /dev/null
+++ b/pyls/_utils.py
@@ -0,0 +1,16 @@
+# Copyright 2017 Palantir Technologies, Inc.
+import functools
+import threading
+
+
+def debounce(interval_s):
+ """Debounce calls to this function until interval_s seconds have passed."""
+ def wrapper(func):
+ @functools.wraps(func)
+ def debounced(*args, **kwargs):
+ if hasattr(debounced, '_timer'):
+ debounced._timer.cancel()
+ debounced._timer = threading.Timer(interval_s, func, args, kwargs)
+ debounced._timer.start()
+ return debounced
+ return wrapper
diff --git a/pyls/python_ls.py b/pyls/python_ls.py
index 2f27bc3..2d8f3f4 100644
--- a/pyls/python_ls.py
+++ b/pyls/python_ls.py
@@ -1,10 +1,13 @@
+# Copyright 2017 Palantir Technologies, Inc.
import logging
-from . import config, lsp, plugins
+from . import config, lsp, plugins, _utils
from .language_server import LanguageServer
from .workspace import Workspace
log = logging.getLogger(__name__)
+LINT_DEBOUNCE_S = 0.5 # 500 ms
+
class PythonLanguageServer(LanguageServer):
@@ -87,6 +90,7 @@ class PythonLanguageServer(LanguageServer):
def hover(self, doc_uri, position):
return self._hook(self._hooks.pyls_hover, doc_uri, position=position) or {'contents': ''}
+ @_utils.debounce(LINT_DEBOUNCE_S)
def lint(self, doc_uri):
self.workspace.publish_diagnostics(doc_uri, flatten(self._hook(
self._hooks.pyls_lint, doc_uri
|
palantir/python-language-server
|
0a5a8de44bda1f457ee37be811444c11237d098c
|
diff --git a/test/test_language_server.py b/test/test_language_server.py
index 7f0de1b..408fccc 100644
--- a/test/test_language_server.py
+++ b/test/test_language_server.py
@@ -85,7 +85,7 @@ def test_linting(client_server):
assert 'capabilities' in response['result']
# didOpen
- client.call('textDocument/didOpen', {
+ client.notify('textDocument/didOpen', {
'textDocument': {'uri': 'file:///test', 'text': 'import sys'}
})
response = _get_notification(client)
diff --git a/test/test_utils.py b/test/test_utils.py
new file mode 100644
index 0000000..911519c
--- /dev/null
+++ b/test/test_utils.py
@@ -0,0 +1,26 @@
+# Copyright 2017 Palantir Technologies, Inc.
+import time
+from pyls import _utils
+
+
+def test_debounce():
+ interval = 0.1
+
+ @_utils.debounce(0.1)
+ def call_m():
+ call_m._count += 1
+
+ call_m._count = 0
+
+ call_m()
+ call_m()
+ call_m()
+ assert call_m._count == 0
+
+ time.sleep(interval * 2)
+ call_m()
+ assert call_m._count == 1
+
+ time.sleep(interval * 2)
+ call_m()
+ assert call_m._count == 2
|
`lint` called too frequently resulting in spurious warnings
The really annoying one is `W291` trailing whitespace. I wouldn't want to disable this warning because it's actually useful (although you can highlight trailing whitespace anyway), but since the `lint` command runs on every change, then every time you enter a space, the warning appears. In `nvim` this means my signcolumn / gutter is constantly appearing and re-appearing, which makes the text area shift left and right a column.
I think the problem could be remedied with more logic [here](https://github.com/palantir/python-language-server/blob/develop/pyls/python_ls.py#L112#L119) instead of calling `self.lint` on every change. Maybe every line would be a better frequency?
I would be happy to implement this at some point.
|
0.0
|
0a5a8de44bda1f457ee37be811444c11237d098c
|
[
"test/test_language_server.py::test_initialize",
"test/test_language_server.py::test_file_closed",
"test/test_language_server.py::test_missing_message",
"test/test_language_server.py::test_linting",
"test/test_utils.py::test_debounce"
] |
[] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-08-29 19:00:41+00:00
|
mit
| 4,444 |
|
palantir__python-language-server-118
|
diff --git a/pyls/config.py b/pyls/config.py
index 595caab..311401b 100644
--- a/pyls/config.py
+++ b/pyls/config.py
@@ -15,14 +15,25 @@ class Config(object):
self._root_uri = root_uri
self._init_opts = init_opts
+ self._disabled_plugins = []
+ self._settings = {}
+
self._pm = pluggy.PluginManager(PYLS)
self._pm.trace.root.setwriter(log.debug)
self._pm.enable_tracing()
self._pm.add_hookspecs(hookspecs)
self._pm.load_setuptools_entrypoints(PYLS)
+
for name, plugin in self._pm.list_name_plugin():
log.info("Loaded pyls plugin %s from %s", name, plugin)
+ for plugin_conf in self._pm.hook.pyls_settings(config=self):
+ self.update(plugin_conf)
+
+ @property
+ def disabled_plugins(self):
+ return self._disabled_plugins
+
@property
def plugin_manager(self):
return self._pm
@@ -39,6 +50,18 @@ class Config(object):
root_path = uris.to_fs_path(self._root_uri)
return find_parents(root_path, path, names)
+ def update(self, settings):
+ """Recursively merge the given settings into the current settings."""
+ self._settings = _merge_dicts(self._settings, settings)
+ log.info("Updated settings to %s", self._settings)
+
+ # All plugins default to enabled
+ self._disabled_plugins = [
+ plugin for name, plugin in self.plugin_manager.list_name_plugin()
+ if not self._settings.get('plugins', {}).get(name, {}).get('enabled', True)
+ ]
+ log.info("Disabled plugins: %s", self._disabled_plugins)
+
def build_config(key, config_files):
"""Parse configuration from the given files for the given key."""
@@ -91,3 +114,19 @@ def find_parents(root, path, names):
# Otherwise nothing
return []
+
+
+def _merge_dicts(dict_a, dict_b):
+ """Recursively merge dictionary b into dictionary a."""
+ def _merge_dicts_(a, b):
+ for key in set(a.keys()).union(b.keys()):
+ if key in a and key in b:
+ if isinstance(a[key], dict) and isinstance(b[key], dict):
+ yield (key, dict(_merge_dicts_(a[key], b[key])))
+ else:
+ yield (key, b[key])
+ elif key in a:
+ yield (key, a[key])
+ else:
+ yield (key, b[key])
+ return dict(_merge_dicts_(dict_a, dict_b))
diff --git a/pyls/hookspecs.py b/pyls/hookspecs.py
index 14c2e52..dc89e5b 100644
--- a/pyls/hookspecs.py
+++ b/pyls/hookspecs.py
@@ -82,6 +82,11 @@ def pyls_references(config, workspace, document, position, exclude_declaration):
pass
+@hookspec
+def pyls_settings(config):
+ pass
+
+
@hookspec(firstresult=True)
def pyls_signature_help(config, workspace, document, position):
pass
diff --git a/pyls/python_ls.py b/pyls/python_ls.py
index 624e298..ab98c96 100644
--- a/pyls/python_ls.py
+++ b/pyls/python_ls.py
@@ -14,12 +14,9 @@ class PythonLanguageServer(LanguageServer):
workspace = None
config = None
- @property
- def _hooks(self):
- return self.config.plugin_manager.hook
-
- def _hook(self, hook, doc_uri=None, **kwargs):
+ def _hook(self, hook_name, doc_uri=None, **kwargs):
doc = self.workspace.get_document(doc_uri) if doc_uri else None
+ hook = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook(config=self.config, workspace=self.workspace, document=doc, **kwargs)
def capabilities(self):
@@ -37,7 +34,7 @@ class PythonLanguageServer(LanguageServer):
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
- 'commands': flatten(self._hook(self._hooks.pyls_commands))
+ 'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
@@ -50,60 +47,58 @@ class PythonLanguageServer(LanguageServer):
def initialize(self, root_uri, init_opts, _process_id):
self.workspace = Workspace(root_uri, lang_server=self)
self.config = config.Config(root_uri, init_opts)
- self._hook(self._hooks.pyls_initialize)
+ self._hook('pyls_initialize')
def code_actions(self, doc_uri, range, context):
- return flatten(self._hook(self._hooks.pyls_code_actions, doc_uri, range=range, context=context))
+ return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
- return flatten(self._hook(self._hooks.pyls_code_lens, doc_uri))
+ return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
- completions = self._hook(self._hooks.pyls_completions, doc_uri, position=position)
+ completions = self._hook('pyls_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
- return flatten(self._hook(self._hooks.pyls_definitions, doc_uri, position=position))
+ return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
- return flatten(self._hook(self._hooks.pyls_document_symbols, doc_uri))
+ return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
- return self._hook(self._hooks.pyls_execute_command, command=command, arguments=arguments)
+ return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
- return self._hook(self._hooks.pyls_format_document, doc_uri)
+ return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
- return self._hook(self._hooks.pyls_format_range, doc_uri, range=range)
+ return self._hook('pyls_format_range', doc_uri, range=range)
def hover(self, doc_uri, position):
- return self._hook(self._hooks.pyls_hover, doc_uri, position=position) or {'contents': ''}
+ return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S)
def lint(self, doc_uri):
- self.workspace.publish_diagnostics(doc_uri, flatten(self._hook(
- self._hooks.pyls_lint, doc_uri
- )))
+ self.workspace.publish_diagnostics(doc_uri, flatten(self._hook('pyls_lint', doc_uri)))
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
- self._hooks.pyls_references, doc_uri, position=position,
+ 'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def signature_help(self, doc_uri, position):
- return self._hook(self._hooks.pyls_signature_help, doc_uri, position=position)
+ return self._hook('pyls_signature_help', doc_uri, position=position)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
self.workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
self.workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
- self._hook(self._hooks.pyls_document_did_open, textDocument['uri'])
+ self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'])
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
@@ -151,8 +146,15 @@ class PythonLanguageServer(LanguageServer):
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
+ def m_workspace__did_change_configuration(self, settings=None):
+ self.config.update((settings or {}).get('pyls'))
+ for doc_uri in self.workspace.documents:
+ self.lint(doc_uri)
+
def m_workspace__did_change_watched_files(self, **_kwargs):
- pass
+ # Externally changed files may result in changed diagnostics
+ for doc_uri in self.workspace.documents:
+ self.lint(doc_uri)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
diff --git a/pyls/workspace.py b/pyls/workspace.py
index 4ce913c..caf4124 100644
--- a/pyls/workspace.py
+++ b/pyls/workspace.py
@@ -29,6 +29,10 @@ class Workspace(object):
self._docs = {}
self._lang_server = lang_server
+ @property
+ def documents(self):
+ return self._docs
+
@property
def root_path(self):
return self._root_path
diff --git a/vscode-client/package.json b/vscode-client/package.json
index 53f1677..9cad08f 100644
--- a/vscode-client/package.json
+++ b/vscode-client/package.json
@@ -15,6 +15,18 @@
"activationEvents": [
"*"
],
+ "contributes": {
+ "configuration": {
+ "title": "Python Language Server Configuration",
+ "type": "object",
+ "properties": {
+ "pyls.plugins": {
+ "type": "object",
+ "description": "Configuration for pyls plugins. Configuration key is the pluggy plugin name."
+ }
+ }
+ }
+ },
"main": "./out/extension",
"scripts": {
"vscode:prepublish": "tsc -p ./",
diff --git a/vscode-client/src/extension.ts b/vscode-client/src/extension.ts
index dd0fa37..40682fb 100644
--- a/vscode-client/src/extension.ts
+++ b/vscode-client/src/extension.ts
@@ -9,12 +9,16 @@ import * as net from 'net';
import { workspace, Disposable, ExtensionContext } from 'vscode';
import { LanguageClient, LanguageClientOptions, SettingMonitor, ServerOptions, ErrorAction, ErrorHandler, CloseAction, TransportKind } from 'vscode-languageclient';
-function startLangServer(command: string, documentSelector: string[]): Disposable {
+function startLangServer(command: string, args: string[], documentSelector: string[]): Disposable {
const serverOptions: ServerOptions = {
- command: command,
+ command,
+ args,
};
const clientOptions: LanguageClientOptions = {
documentSelector: documentSelector,
+ synchronize: {
+ configurationSection: "pyls"
+ }
}
return new LanguageClient(command, serverOptions, clientOptions).start();
}
@@ -39,7 +43,7 @@ function startLangServerTCP(addr: number, documentSelector: string[]): Disposabl
}
export function activate(context: ExtensionContext) {
- context.subscriptions.push(startLangServer("pyls", ["python"]));
+ context.subscriptions.push(startLangServer("pyls", ["-v"], ["python"]));
// For TCP
// context.subscriptions.push(startLangServerTCP(2087, ["python"]));
}
|
palantir/python-language-server
|
04e02613920d807725588d26397116ac21e1fccb
|
diff --git a/test/test_config.py b/test/test_config.py
index fe5510d..52beb28 100644
--- a/test/test_config.py
+++ b/test/test_config.py
@@ -1,5 +1,5 @@
# Copyright 2017 Palantir Technologies, Inc.
-from pyls.config import find_parents
+from pyls.config import find_parents, _merge_dicts
def test_find_parents(tmpdir):
@@ -8,3 +8,10 @@ def test_find_parents(tmpdir):
test_cfg = tmpdir.ensure("test.cfg")
assert find_parents(tmpdir.strpath, path.strpath, ["test.cfg"]) == [test_cfg.strpath]
+
+
+def test_merge_dicts():
+ assert _merge_dicts(
+ {'a': True, 'b': {'x': 123, 'y': {'hello': 'world'}}},
+ {'a': False, 'b': {'y': [], 'z': 987}}
+ ) == {'a': False, 'b': {'x': 123, 'y': [], 'z': 987}}
|
Support configuration for enabling/disabling/configuring plugins
As well as enabling/disabling everything, we want to:
* Pass pycodestyle / pyflakes configs
* Choose the style for Yapf (pep8, google, config file)
|
0.0
|
04e02613920d807725588d26397116ac21e1fccb
|
[
"test/test_config.py::test_find_parents",
"test/test_config.py::test_merge_dicts"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-09-03 18:18:19+00:00
|
mit
| 4,445 |
|
palantir__python-language-server-124
|
diff --git a/pyls/language_server.py b/pyls/language_server.py
index cfd187b..ce11da6 100644
--- a/pyls/language_server.py
+++ b/pyls/language_server.py
@@ -37,7 +37,7 @@ def start_tcp_lang_server(bind_addr, port, handler_class):
log.info("Serving %s on (%s, %s)", handler_class.__name__, bind_addr, port)
server.serve_forever()
except KeyboardInterrupt:
- server.shutdown()
+ server.exit()
finally:
log.info("Shutting down")
server.server_close()
@@ -113,7 +113,7 @@ class LanguageServer(MethodJSONRPCServer):
self.shutdown()
def m_exit(self, **_kwargs):
- self.shutdown()
+ self.exit()
_RE_FIRST_CAP = re.compile('(.)([A-Z][a-z]+)')
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index a0ee0c7..668096d 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -64,12 +64,6 @@ class PyCodeStyleDiagnosticReport(pycodestyle.BaseReport):
'range': range,
'message': text,
'code': code,
- 'severity': _get_severity(code)
+ # Are style errors really ever errors?
+ 'severity': lsp.DiagnosticSeverity.Warning
})
-
-
-def _get_severity(code):
- if code[0] == 'E':
- return lsp.DiagnosticSeverity.Error
- elif code[0] == 'W':
- return lsp.DiagnosticSeverity.Warning
diff --git a/pyls/server.py b/pyls/server.py
index 9e3fcac..83f8002 100644
--- a/pyls/server.py
+++ b/pyls/server.py
@@ -14,23 +14,36 @@ class JSONRPCServer(object):
def __init__(self, rfile, wfile):
self.rfile = rfile
self.wfile = wfile
+ self._shutdown = False
- def shutdown(self):
- # TODO: we should handle this much better
+ def exit(self):
+ # Exit causes a complete exit of the server
self.rfile.close()
self.wfile.close()
+ def shutdown(self):
+ # Shutdown signals the server to stop, but not exit
+ self._shutdown = True
+ log.debug("Server shut down, awaiting exit notification")
+
def handle(self):
# VSCode wants us to keep the connection open, so let's handle messages in a loop
while True:
try:
data = self._read_message()
log.debug("Got message: %s", data)
+
+ if self._shutdown:
+ # Handle only the exit notification when we're shut down
+ jsonrpc.JSONRPCResponseManager.handle(data, {'exit': self.exit})
+ break
+
response = jsonrpc.JSONRPCResponseManager.handle(data, self)
+
if response is not None:
self._write_message(response.data)
except Exception:
- log.exception("Language server shutting down for uncaught exception")
+ log.exception("Language server exiting due to uncaught exception")
break
def call(self, method, params=None):
|
palantir/python-language-server
|
aa8e1b2c12759470c18910c61f55ab4c6b80d344
|
diff --git a/test/test_language_server.py b/test/test_language_server.py
index 408fccc..e1641df 100644
--- a/test/test_language_server.py
+++ b/test/test_language_server.py
@@ -37,10 +37,10 @@ def client_server():
yield client, server
- try:
- client.call('shutdown')
- except:
- pass
+ client.call('shutdown')
+ response = _get_response(client)
+ assert response['result'] is None
+ client.notify('exit')
def test_initialize(client_server):
@@ -56,13 +56,6 @@ def test_initialize(client_server):
assert 'capabilities' in response['result']
-def test_file_closed(client_server):
- client, server = client_server
- client.rfile.close()
- with pytest.raises(Exception):
- _get_response(client)
-
-
def test_missing_message(client_server):
client, server = client_server
|
Severity of linting erros
I currently really dislike the displayed errors of the linters.
If I use `pycodestyle` and `Pyflakes` I would prefer that `pycodestyle`s errors like `line too long`, `missing whitespace` could be *downgraded* to warnings or info. Obviously `Pyflakes` messages are more severe, but I still would like `pycodestyles` as an information.
I hope it can be understood what I mean. Is there a way to configure `pyls` this way?
|
0.0
|
aa8e1b2c12759470c18910c61f55ab4c6b80d344
|
[
"test/test_language_server.py::test_initialize",
"test/test_language_server.py::test_missing_message",
"test/test_language_server.py::test_linting"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-09-04 22:54:53+00:00
|
mit
| 4,446 |
|
palantir__python-language-server-146
|
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index 668096d..2a5baab 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -21,12 +21,29 @@ def pyls_lint(config, document):
conf_to_use = pycodestyle_conf if pycodestyle_conf else pep8_conf
conf = {k.replace("-", "_"): v for k, v in conf_to_use.items()}
- log.debug("Got pycodestyle config: %s", conf)
# Grab the pycodestyle parser and set the defaults based on the config we found
parser = pycodestyle.get_parser()
parser.set_defaults(**conf)
- opts, _args = parser.parse_args([])
+
+ # Override with any options set in the language server config
+ argv = []
+ ls_conf = config.plugin_settings('pycodestyle')
+ if ls_conf.get('exclude') is not None:
+ argv.extend(['--exclude', ','.join(ls_conf['exclude'])])
+ if ls_conf.get('filename') is not None:
+ argv.extend(['--filename', ','.join(ls_conf['filename'])])
+ if ls_conf.get('select') is not None:
+ argv.extend(['--select', ','.join(ls_conf['select'])])
+ if ls_conf.get('ignore') is not None:
+ argv.extend(['--ignore', ','.join(ls_conf['ignore'])])
+ if ls_conf.get('maxLineLength') is not None:
+ argv.extend(['--max-line-length', str(ls_conf['maxLineLength'])])
+ if ls_conf.get('hangClosing'):
+ argv.extend(['--hang-closing'])
+
+ opts, _args = parser.parse_args(argv)
+ log.debug("Got pycodestyle config: %s", opts)
styleguide = pycodestyle.StyleGuide(vars(opts))
c = pycodestyle.Checker(
diff --git a/vscode-client/package.json b/vscode-client/package.json
index b75ce25..5c8b285 100644
--- a/vscode-client/package.json
+++ b/vscode-client/package.json
@@ -70,6 +70,52 @@
"default": true,
"description": "Enable or disable the plugin."
},
+ "pyls.plugins.pycodestyle.exclude": {
+ "type": "array",
+ "default": null,
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "description": "Exclude files or directories which match these patterns."
+ },
+ "pyls.plugins.pycodestyle.filename": {
+ "type": "array",
+ "default": null,
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "description": "When parsing directories, only check filenames matching these patterns."
+ },
+ "pyls.plugins.pycodestyle.select": {
+ "type": "array",
+ "default": null,
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "description": "Select errors and warnings"
+ },
+ "pyls.plugins.pycodestyle.ignore": {
+ "type": "array",
+ "default": null,
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "description": "Ignore errors and warnings"
+ },
+ "pyls.plugins.pycodestyle.hangClosing": {
+ "type": "boolean",
+ "default": null,
+ "description": "Hang closing bracket instead of matching indentation of opening bracket's line."
+ },
+ "pyls.plugins.pycodestyle.maxLineLength": {
+ "type": "number",
+ "default": null,
+ "description": "Set maximum allowed line length."
+ },
"pyls.plugins.pydocstyle.enabled": {
"type": "boolean",
"default": false,
|
palantir/python-language-server
|
a1bbd401621f8fc3083f4341b7912b491e82bf68
|
diff --git a/test/plugins/test_lint.py b/test/plugins/test_lint.py
index a760c68..a02d992 100644
--- a/test/plugins/test_lint.py
+++ b/test/plugins/test_lint.py
@@ -96,6 +96,12 @@ def test_pycodestyle_config(workspace):
os.unlink(os.path.join(workspace.root_path, conf_file))
+ # Make sure we can ignore via the PYLS config as well
+ config.update({'plugins': {'pycodestyle': {'ignore': ['W191']}}})
+ # And make sure we don't get any warnings
+ diags = pycodestyle_lint.pyls_lint(config, doc)
+ assert not [d for d in diags if d['code'] == 'W191']
+
def test_pydocstyle():
doc = Document(DOC_URI, DOC)
|
Custom pycodestyle configuration via xcode/Atom settings
It would be really great to configure the ignored error codes and the maximum line length of `pycodestyle` via the settings if no configuration file is found on the system.
This is the most requested issue on the Atom package for `pyls`: https://github.com/lgeiger/ide-python/issues/9
|
0.0
|
a1bbd401621f8fc3083f4341b7912b491e82bf68
|
[
"test/plugins/test_lint.py::test_pycodestyle_config"
] |
[
"test/plugins/test_lint.py::test_mccabe",
"test/plugins/test_lint.py::test_pycodestyle",
"test/plugins/test_lint.py::test_pyflakes",
"test/plugins/test_lint.py::test_syntax_error_pyflakes"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-09-29 21:10:31+00:00
|
mit
| 4,447 |
|
palantir__python-language-server-211
|
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index 8441281..96efafd 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -12,12 +12,12 @@ def pyls_lint(config, document):
log.debug("Got pycodestyle settings: %s", settings)
opts = {
- 'exclude': ','.join(settings.get('exclude') or []),
- 'filename': ','.join(settings.get('filename') or []),
+ 'exclude': settings.get('exclude'),
+ 'filename': settings.get('filename'),
'hang_closing': settings.get('hangClosing'),
- 'ignore': ','.join(settings.get('ignore') or []),
+ 'ignore': settings.get('ignore'),
'max_line_length': settings.get('maxLineLength'),
- 'select': ','.join(settings.get('select') or []),
+ 'select': settings.get('select'),
}
kwargs = {k: v for k, v in opts.items() if v}
styleguide = pycodestyle.StyleGuide(kwargs)
|
palantir/python-language-server
|
897980b7e2bd71811311cb49b18cf89ed3aa9cbe
|
diff --git a/test/plugins/test_pycodestyle_lint.py b/test/plugins/test_pycodestyle_lint.py
index 028997f..583da79 100644
--- a/test/plugins/test_pycodestyle_lint.py
+++ b/test/plugins/test_pycodestyle_lint.py
@@ -8,7 +8,7 @@ from pyls.plugins import pycodestyle_lint
DOC_URI = uris.from_fs_path(__file__)
DOC = """import sys
-def hello():
+def hello( ):
\tpass
import json
@@ -40,6 +40,14 @@ def test_pycodestyle(config):
assert mod_import['range']['start'] == {'line': 7, 'character': 0}
assert mod_import['range']['end'] == {'line': 7, 'character': 1}
+ msg = "E201 whitespace after '('"
+ mod_import = [d for d in diags if d['message'] == msg][0]
+
+ assert mod_import['code'] == 'E201'
+ assert mod_import['severity'] == lsp.DiagnosticSeverity.Warning
+ assert mod_import['range']['start'] == {'line': 2, 'character': 10}
+ assert mod_import['range']['end'] == {'line': 2, 'character': 14}
+
def test_pycodestyle_config(workspace):
""" Test that we load config files properly.
@@ -66,7 +74,7 @@ def test_pycodestyle_config(workspace):
assert [d for d in diags if d['code'] == 'W191']
content = {
- 'setup.cfg': ('[pycodestyle]\nignore = W191', True),
+ 'setup.cfg': ('[pycodestyle]\nignore = W191, E201', True),
'tox.ini': ('', False)
}
@@ -77,18 +85,16 @@ def test_pycodestyle_config(workspace):
# And make sure we don't get any warnings
diags = pycodestyle_lint.pyls_lint(config, doc)
- assert len([d for d in diags if d['code'] == 'W191']) == 0 if working else 1
+ assert len([d for d in diags if d['code'] == 'W191']) == (0 if working else 1)
+ assert len([d for d in diags if d['code'] == 'E201']) == (0 if working else 1)
+ assert [d for d in diags if d['code'] == 'W391']
os.unlink(os.path.join(workspace.root_path, conf_file))
# Make sure we can ignore via the PYLS config as well
- config.update({'plugins': {'pycodestyle': {'ignore': ['W191']}}})
+ config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'E201']}}})
# And make sure we only get one warning
diags = pycodestyle_lint.pyls_lint(config, doc)
assert not [d for d in diags if d['code'] == 'W191']
-
- # Ignore both warnings
- config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'W391']}}})
- # And make sure we get neither
- assert not [d for d in diags if d['code'] == 'W191']
- assert not [d for d in diags if d['code'] == 'W391']
+ assert not [d for d in diags if d['code'] == 'E201']
+ assert [d for d in diags if d['code'] == 'W391']
|
Fix ignored and select settings interface with pycodestyle
On https://github.com/PyCQA/pycodestyle/pull/722 they refused to fix their interface. When passing the list arguments as `ignore` and `select` settings to `pycodestyle`, it is required to pass a python list as `["E201", "E501"]`, instead of a string `"E201,E501"`, otherwise they will cause the issue pointed on: https://github.com/tomv564/LSP/issues/244#issuecomment-358753274
|
0.0
|
897980b7e2bd71811311cb49b18cf89ed3aa9cbe
|
[
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle_config"
] |
[
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-12-26 01:15:40+00:00
|
mit
| 4,448 |
|
palantir__python-language-server-220
|
diff --git a/pyls/language_server.py b/pyls/language_server.py
index b898ae1..f310182 100644
--- a/pyls/language_server.py
+++ b/pyls/language_server.py
@@ -65,7 +65,7 @@ class LanguageServer(dispatcher.JSONRPCMethodDispatcher, JSONRPCServer):
pass
def m_initialize(self, **kwargs):
- log.debug("Language server intialized with %s", kwargs)
+ log.debug("Language server initialized with %s", kwargs)
if 'rootUri' in kwargs:
self.root_uri = kwargs['rootUri']
elif 'rootPath' in kwargs:
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index d157a38..2940166 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -12,12 +12,12 @@ def pyls_lint(config, document):
log.debug("Got pycodestyle settings: %s", settings)
opts = {
- 'exclude': settings.get('exclude'),
- 'filename': settings.get('filename'),
+ 'exclude': ','.join(settings.get('exclude') or []),
+ 'filename': ','.join(settings.get('filename') or []),
'hang_closing': settings.get('hangClosing'),
- 'ignore': settings.get('ignore'),
+ 'ignore': ','.join(settings.get('ignore') or []),
'max_line_length': settings.get('maxLineLength'),
- 'select': settings.get('select'),
+ 'select': ','.join(settings.get('select') or []),
}
styleguide = pycodestyle.StyleGuide({k: v for k, v in opts.items() if v is not None})
@@ -27,6 +27,7 @@ def pyls_lint(config, document):
)
c.check_all()
diagnostics = c.report.diagnostics
+
return diagnostics
|
palantir/python-language-server
|
85907d6f7af5aaf1d6fd13c543327319b0c02282
|
diff --git a/test/plugins/test_pycodestyle_lint.py b/test/plugins/test_pycodestyle_lint.py
index a93b513..028997f 100644
--- a/test/plugins/test_pycodestyle_lint.py
+++ b/test/plugins/test_pycodestyle_lint.py
@@ -86,4 +86,9 @@ def test_pycodestyle_config(workspace):
# And make sure we only get one warning
diags = pycodestyle_lint.pyls_lint(config, doc)
assert not [d for d in diags if d['code'] == 'W191']
- assert [d for d in diags if d['code'] == 'W391']
+
+ # Ignore both warnings
+ config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'W391']}}})
+ # And make sure we get neither
+ assert not [d for d in diags if d['code'] == 'W191']
+ assert not [d for d in diags if d['code'] == 'W391']
|
pycodestyle config not being read, seeing runtime errors since 0.12.0
Since upgrading to 0.12.1 (from 0.11.1) I noticed that I was getting D100 and D103 docstring warnings in my code, even though I ignored them via `$HOME/.config/pycodestyle`. I tried adding a `setup.cfg` to the project directory but I still got the warnings.
When running `pyls` (as `pyls --tcp --port 9001 -v`) in TCP mode I saw the following output after loading a Python file. Could the issue I'm seeing be related to the pydocstyle error?
```
[...]
2017-12-25 11:09:59,221 UTC - INFO - pyls.config.config - Loaded pyls plugin pycodestyle from <module 'pyls.plugins.pycodestyle_lint' from '/home/terr/.local/lib/python3.5/site-packages/pyls/plugins/pycodestyle_lint.py'>
[...]
2017-12-25 11:04:15,188 UTC - INFO - pyls.config.config - Loaded pyls plugin mccabe from <module 'pyls.plugins.mccabe_lint' from '/home/terr/.local/lib/python3.5/site-packages/pyls/plugins/mccabe_lint.py'>
2017-12-25 11:04:15,188 UTC - INFO - pyls.config.config - Loaded pyls plugin jedi_signature_help from <module 'pyls.plugins.signature' from '/home/terr/.local/lib/python3.5/site-packages/pyls/plugins/signature.py'>
Usage: pydocstyle [options] [<file|dir>...]
pyls: error: no such option: --tcp
```
I'm assuming the "no such option: --tcp" error doesn't occur in IO mode. I've got the most recent version of `pydocstyle` installed, 2.1.1.
I also tried 0.12.0, where I got the same output and results (warnings about D100/D103). Version 0.11.1 still works fine, both in IO and TCP mode.
|
0.0
|
85907d6f7af5aaf1d6fd13c543327319b0c02282
|
[
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle_config"
] |
[
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-01-09 23:13:07+00:00
|
mit
| 4,449 |
|
palantir__python-language-server-275
|
diff --git a/pyls/_utils.py b/pyls/_utils.py
index fb353c3..56a8630 100644
--- a/pyls/_utils.py
+++ b/pyls/_utils.py
@@ -104,5 +104,5 @@ def format_docstring(contents):
def clip_column(column, lines, line_number):
# Normalise the position as per the LSP that accepts character positions > line length
# https://github.com/Microsoft/language-server-protocol/blob/master/protocol.md#position
- max_column = len(lines[line_number]) - 1 if len(lines) > line_number else 0
+ max_column = len(lines[line_number].rstrip('\r\n')) if len(lines) > line_number else 0
return min(column, max_column)
diff --git a/pyls/python_ls.py b/pyls/python_ls.py
index 61de356..e1c9297 100644
--- a/pyls/python_ls.py
+++ b/pyls/python_ls.py
@@ -32,7 +32,7 @@ class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
def start_tcp_lang_server(bind_addr, port, handler_class):
- if not isinstance(handler_class, PythonLanguageServer):
+ if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
# Construct a custom wrapper class around the user's handler_class
|
palantir/python-language-server
|
67811f275c8ce61dea78400e2b202bf7f5dc4707
|
diff --git a/test/test_utils.py b/test/test_utils.py
index 6675374..9d00686 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -52,6 +52,12 @@ def test_merge_dicts():
def test_clip_column():
- assert _utils.clip_column(5, ['123'], 0) == 2
- assert _utils.clip_column(2, ['\n', '123'], 1) == 2
assert _utils.clip_column(0, [], 0) == 0
+ assert _utils.clip_column(2, ['123'], 0) == 2
+ assert _utils.clip_column(3, ['123'], 0) == 3
+ assert _utils.clip_column(5, ['123'], 0) == 3
+ assert _utils.clip_column(0, ['\n', '123'], 0) == 0
+ assert _utils.clip_column(1, ['\n', '123'], 0) == 0
+ assert _utils.clip_column(2, ['123\n', '123'], 0) == 2
+ assert _utils.clip_column(3, ['123\n', '123'], 0) == 3
+ assert _utils.clip_column(4, ['123\n', '123'], 1) == 3
|
TCP mode is not working since v0.15.0
When I attempt to start the language server in TCP mode using the command
``` pyls --tcp --host 127.0.0.1 --port 7003 ```
it fails to start the language server with the error ```Handler class must be an instance of PythonLanguageServer```
|
0.0
|
67811f275c8ce61dea78400e2b202bf7f5dc4707
|
[
"test/test_utils.py::test_clip_column"
] |
[
"test/test_utils.py::test_debounce",
"test/test_utils.py::test_list_to_string",
"test/test_utils.py::test_camel_to_underscore",
"test/test_utils.py::test_find_parents",
"test/test_utils.py::test_merge_dicts"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-02-26 13:08:12+00:00
|
mit
| 4,450 |
|
palantir__python-language-server-63
|
diff --git a/pyls/__main__.py b/pyls/__main__.py
index fe9f564..b88ae07 100644
--- a/pyls/__main__.py
+++ b/pyls/__main__.py
@@ -53,4 +53,29 @@ def main():
if args.tcp:
language_server.start_tcp_lang_server(args.host, args.port, PythonLanguageServer)
else:
- language_server.start_io_lang_server(sys.stdin, sys.stdout, PythonLanguageServer)
+ stdin, stdout = _binary_stdio()
+ language_server.start_io_lang_server(stdin, stdout, PythonLanguageServer)
+
+
+def _binary_stdio():
+ """Construct binary stdio streams (not text mode).
+
+ This seems to be different for Window/Unix Python2/3, so going by:
+ https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
+ """
+ PY3K = sys.version_info >= (3, 0)
+
+ if PY3K:
+ stdin, stdout = sys.stdin.buffer, sys.stdout.buffer
+ else:
+ # Python 2 on Windows opens sys.stdin in text mode, and
+ # binary data that read from it becomes corrupted on \r\n
+ if sys.platform == "win32":
+ # set sys.stdin to binary mode
+ import os
+ import msvcrt
+ msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ stdin, stdout = sys.stdin, sys.stdout
+
+ return stdin, stdout
diff --git a/pyls/server.py b/pyls/server.py
index 391ca99..9e3fcac 100644
--- a/pyls/server.py
+++ b/pyls/server.py
@@ -49,8 +49,8 @@ class JSONRPCServer(object):
self._write_message(req.data)
def _content_length(self, line):
- if line.startswith("Content-Length: "):
- _, value = line.split("Content-Length: ")
+ if line.startswith(b'Content-Length: '):
+ _, value = line.split(b'Content-Length: ')
value = value.strip()
try:
return int(value)
@@ -83,5 +83,5 @@ class JSONRPCServer(object):
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{}".format(content_length, body)
)
- self.wfile.write(response)
+ self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
|
palantir/python-language-server
|
8c038c38106bae4d04faf88fa53755e263cfd586
|
diff --git a/test/test_language_server.py b/test/test_language_server.py
index b1a68a5..7f0de1b 100644
--- a/test/test_language_server.py
+++ b/test/test_language_server.py
@@ -28,12 +28,12 @@ def client_server():
scr, scw = os.pipe()
server = Thread(target=start_io_lang_server, args=(
- os.fdopen(csr), os.fdopen(scw, 'w'), PythonLanguageServer
+ os.fdopen(csr, 'rb'), os.fdopen(scw, 'wb'), PythonLanguageServer
))
server.daemon = True
server.start()
- client = JSONRPCClient(os.fdopen(scr), os.fdopen(csw, 'w'))
+ client = JSONRPCClient(os.fdopen(scr, 'rb'), os.fdopen(csw, 'wb'))
yield client, server
@@ -95,10 +95,10 @@ def test_linting(client_server):
def _get_notification(client):
- request = jsonrpc.jsonrpc.JSONRPCRequest.from_json(client._read_message())
+ request = jsonrpc.jsonrpc.JSONRPCRequest.from_json(client._read_message().decode('utf-8'))
assert request.is_notification
return request.data
def _get_response(client):
- return json.loads(client._read_message())
+ return json.loads(client._read_message().decode('utf-8'))
|
Newline issues on Windows
Hi there,
Thanks for the excellent work you've done on the language server! Overall, it's working great on OSX and Linux - I'm working on incorporating it into Oni: https://github.com/extr0py/oni
There is one blocking issue on Windows, however:
**Issue:** The LSP protocol expects there to be `\r\n\r\n` following the Content-Header. This works as expected on OSX and Linux. However, on Windows, we actually get `\r\r\n\r\r\n`, which some LSP clients will not handle this. `vscode-jsonrpc` is strict on matching this and therefore never realizes the stream is complete, and never completes initialization. I've tested this using the _stdio_ strategy - I haven't validated with the _tcp_ strategy.
**Defect:** It turns out Python has some 'magic' behavior with newlines - see https://stackoverflow.com/questions/2536545/how-to-write-unix-end-of-line-characters-in-windows-using-python. It looks like Python is converting `\n` on Windows to `os.linesep` - which is `\r\n`.
This is the impacted code (`server.py`):
```
def _write_message(self, msg):
body = json.dumps(msg, separators=(",", ":"))
content_length = len(body)
response = (
"Content-Length: {}\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{}".format(content_length, body)
)
self.wfile.write(response)
self.wfile.flush()
```
In this case, the `\n` in the `\r\n` is getting expanded - so we actually end up with `\r\n`.
**Proposed Fix**: The `os.linesep` should be checked. If it is `\n`, we should use `\r\n` as the line ending to conform to the protocol. If it is `\r\n`, that means `\n` will be expanded to `\r\n`, so we should use `\n` as the terminator above.
I'm not an expert in Python, so there may be a cleaner way to fix this. It looks like when reading from a file, there are options in terms of handling newlines, so maybe there is an option to set when writing to the output. I'm not sure as well if this would cause problems with the tcp server.
Let me know if this isn't clear - happy to give more info.
|
0.0
|
8c038c38106bae4d04faf88fa53755e263cfd586
|
[
"test/test_language_server.py::test_initialize",
"test/test_language_server.py::test_missing_message",
"test/test_language_server.py::test_linting"
] |
[
"test/test_language_server.py::test_file_closed"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-06-27 19:20:58+00:00
|
mit
| 4,451 |
|
palantir__python-language-server-652
|
diff --git a/pyls/workspace.py b/pyls/workspace.py
index eaebc12..0ffab25 100644
--- a/pyls/workspace.py
+++ b/pyls/workspace.py
@@ -88,8 +88,8 @@ class Workspace(object):
def source_roots(self, document_path):
"""Return the source roots for the given document."""
- files = _utils.find_parents(self._root_path, document_path, ['setup.py']) or []
- return [os.path.dirname(setup_py) for setup_py in files]
+ files = _utils.find_parents(self._root_path, document_path, ['setup.py', 'pyproject.toml']) or []
+ return list(set((os.path.dirname(project_file) for project_file in files))) or [self._root_path]
def _create_document(self, doc_uri, source=None, version=None):
path = uris.to_fs_path(doc_uri)
|
palantir/python-language-server
|
fa486efaf1a2bf64f70d03537cb07ba465256192
|
diff --git a/test/test_workspace.py b/test/test_workspace.py
index f510eaa..9b5b7b0 100644
--- a/test/test_workspace.py
+++ b/test/test_workspace.py
@@ -1,7 +1,9 @@
# Copyright 2017 Palantir Technologies, Inc.
import os
-import os.path as osp
import sys
+
+import pytest
+
from pyls import uris
PY2 = sys.version_info.major == 2
@@ -16,7 +18,7 @@ DOC_URI = uris.from_fs_path(__file__)
def path_as_uri(path):
- return pathlib.Path(osp.abspath(path)).as_uri()
+ return pathlib.Path(os.path.abspath(path)).as_uri()
def test_local(pyls):
@@ -49,14 +51,16 @@ def test_rm_document(pyls):
assert pyls.workspace.get_document(DOC_URI)._source is None
-def test_non_root_project(pyls):
[email protected]('metafiles', [('setup.py',), ('pyproject.toml',), ('setup.py', 'pyproject.toml')])
+def test_non_root_project(pyls, metafiles):
repo_root = os.path.join(pyls.workspace.root_path, 'repo-root')
os.mkdir(repo_root)
project_root = os.path.join(repo_root, 'project-root')
os.mkdir(project_root)
- with open(os.path.join(project_root, 'setup.py'), 'w+') as f:
- f.write('# setup.py')
+ for metafile in metafiles:
+ with open(os.path.join(project_root, metafile), 'w+') as f:
+ f.write('# ' + metafile)
test_uri = uris.from_fs_path(os.path.join(project_root, 'hello/test.py'))
pyls.workspace.put_document(test_uri, 'assert True')
@@ -64,6 +68,15 @@ def test_non_root_project(pyls):
assert project_root in test_doc.sys_path()
+def test_root_project_with_no_setup_py(pyls):
+ """Default to workspace root."""
+ workspace_root = pyls.workspace.root_path
+ test_uri = uris.from_fs_path(os.path.join(workspace_root, 'hello/test.py'))
+ pyls.workspace.put_document(test_uri, 'assert True')
+ test_doc = pyls.workspace.get_document(test_uri)
+ assert workspace_root in test_doc.sys_path()
+
+
def test_multiple_workspaces(tmpdir, pyls):
workspace1_dir = tmpdir.mkdir('workspace1')
workspace2_dir = tmpdir.mkdir('workspace2')
|
No default source root
If no `setup.py` is found, `Workspace.source_roots()` currently returns `[]`.
I think a preferable alternative would be to return the Workspace root (as the language client may have other means for detecting a source root).
|
0.0
|
fa486efaf1a2bf64f70d03537cb07ba465256192
|
[
"test/test_workspace.py::test_non_root_project[metafiles1]",
"test/test_workspace.py::test_root_project_with_no_setup_py"
] |
[
"test/test_workspace.py::test_local",
"test/test_workspace.py::test_put_document",
"test/test_workspace.py::test_get_document",
"test/test_workspace.py::test_get_missing_document",
"test/test_workspace.py::test_rm_document",
"test/test_workspace.py::test_non_root_project[metafiles0]",
"test/test_workspace.py::test_non_root_project[metafiles2]",
"test/test_workspace.py::test_multiple_workspaces"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-05 07:13:32+00:00
|
mit
| 4,452 |
|
palantir__python-language-server-7
|
diff --git a/circle.yml b/circle.yml
new file mode 100644
index 0000000..d66bb32
--- /dev/null
+++ b/circle.yml
@@ -0,0 +1,3 @@
+machine:
+ post:
+ - pyenv global 2.7.11 3.6.0
diff --git a/pyls/providers/code_lens.py b/pyls/providers/code_lens.py
deleted file mode 100644
index 3e97e68..0000000
--- a/pyls/providers/code_lens.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2017 Palantir Technologies, Inc.
-import logging
-from .base import JediProvider
-
-log = logging.getLogger(__name__)
-
-
-class JediReferencesProvider(JediProvider):
- """ Returns the references to whatever is under the cursor, if it's known """
-
- def run(self, doc_uri, position, exclude_declaration=False):
- usages = self.jedi_script(doc_uri, position).usages()
-
- if exclude_declaration:
- # Filter out if the usage is the actual declaration of the thing
- usages = [d for d in usages if not d.is_definition()]
-
- return [{
- 'uri': self.workspace.get_uri_like(doc_uri, d.module_path),
- 'range': {
- 'start': {'line': d.line - 1, 'character': d.column},
- 'end': {'line': d.line - 1, 'character': d.column + len(d.name)}
- }
- } for d in usages]
diff --git a/pyls/providers/completion.py b/pyls/providers/completion.py
index e432808..584fd6a 100644
--- a/pyls/providers/completion.py
+++ b/pyls/providers/completion.py
@@ -28,13 +28,11 @@ def sort_text(definition):
""" Ensure builtins appear at the bottom.
Description is of format <type>: <module>.<item>
"""
- mod = definition.description.split(":", 1)[1].strip()
-
if definition.in_builtin_module():
# It's a builtin, put it last
return 'z' + definition.name
- if '.' in mod and mod.rsplit(".", 1)[1].startswith("_"):
+ if definition.name.startswith("_"):
# It's a 'hidden' func, put it next last
return 'y' + definition.name
diff --git a/pyls/providers/hover.py b/pyls/providers/hover.py
index dc28016..74b7afa 100644
--- a/pyls/providers/hover.py
+++ b/pyls/providers/hover.py
@@ -9,16 +9,16 @@ class JediDocStringHoverProvider(JediProvider):
""" Displays the docstring of whatever is under the cursor, if it's known """
def run(self, doc_uri, position):
- completions = self.jedi_script(doc_uri, position).completions()
+ definitions = self.jedi_script(doc_uri, position).goto_definitions()
document = self.workspace.get_document(doc_uri)
word = document.word_at_position(position)
# Find an exact match for a completion
- completions = [c for c in completions if c.name == word]
+ definitions = [d for d in definitions if d.name == word]
- if len(completions) == 0:
+ if len(definitions) == 0:
# :(
return {'contents': ''}
# Maybe the docstring could be huuuuuuuuuuge...
- return {'contents': completions[0].docstring() or ""}
+ return {'contents': definitions[0].docstring() or ""}
|
palantir/python-language-server
|
9ea92b5efb264bc564f60bfc7475883e60944bd0
|
diff --git a/test/providers/test_hover.py b/test/providers/test_hover.py
index 107bd31..596e014 100644
--- a/test/providers/test_hover.py
+++ b/test/providers/test_hover.py
@@ -2,17 +2,17 @@
from pyls.providers.hover import JediDocStringHoverProvider
DOC_URI = __file__
-DOC = """import sys
+DOC = """
def main():
- print sys.stdin.read()
- raise Exception()
+ \"\"\"hello world\"\"\"
+ pass
"""
def test_hover(workspace):
- # Over 'Exception' in raise Exception()
- hov_position = {'line': 4, 'character': 17}
+ # Over 'main' in def main():
+ hov_position = {'line': 2, 'character': 6}
# Over the blank second line
no_hov_position = {'line': 1, 'character': 0}
@@ -20,7 +20,7 @@ def test_hover(workspace):
provider = JediDocStringHoverProvider(workspace)
assert {
- 'contents': 'Common base class for all non-exit exceptions.'
+ 'contents': 'main()\n\nhello world'
} == provider.run(DOC_URI, hov_position)
assert {'contents': ''} == provider.run(DOC_URI, no_hov_position)
|
Implement continuous integration with CircleCi
|
0.0
|
9ea92b5efb264bc564f60bfc7475883e60944bd0
|
[
"test/providers/test_hover.py::test_hover"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_removed_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-01-30 15:17:16+00:00
|
mit
| 4,453 |
|
palantir__python-language-server-754
|
diff --git a/pyls/python_ls.py b/pyls/python_ls.py
index 577675f..8d6c5f0 100644
--- a/pyls/python_ls.py
+++ b/pyls/python_ls.py
@@ -361,14 +361,37 @@ class PythonLanguageServer(MethodDispatcher):
for doc_uri in workspace.documents:
self.lint(doc_uri, is_saved=False)
- def m_workspace__did_change_workspace_folders(self, added=None, removed=None, **_kwargs):
+ def m_workspace__did_change_workspace_folders(self, event=None, **_kwargs): # pylint: disable=too-many-locals
+ if event is None:
+ return
+ added = event.get('added', [])
+ removed = event.get('removed', [])
+
for removed_info in removed:
- removed_uri = removed_info['uri']
- self.workspaces.pop(removed_uri)
+ if 'uri' in removed_info:
+ removed_uri = removed_info['uri']
+ self.workspaces.pop(removed_uri, None)
for added_info in added:
- added_uri = added_info['uri']
- self.workspaces[added_uri] = Workspace(added_uri, self._endpoint, self.config)
+ if 'uri' in added_info:
+ added_uri = added_info['uri']
+ self.workspaces[added_uri] = Workspace(added_uri, self._endpoint, self.config)
+
+ root_workspace_removed = any(removed_info['uri'] == self.root_uri for removed_info in removed)
+ workspace_added = len(added) > 0 and 'uri' in added[0]
+ if root_workspace_removed and workspace_added:
+ added_uri = added[0]['uri']
+ self.root_uri = added_uri
+ self.workspace = self.workspaces[added_uri]
+ elif root_workspace_removed:
+ # NOTE: Removing the root workspace can only happen when the server
+ # is closed, thus the else condition of this if can never happen.
+ if self.workspaces:
+ log.debug('Root workspace deleted!')
+ available_workspaces = sorted(self.workspaces)
+ first_workspace = available_workspaces[0]
+ self.root_uri = first_workspace
+ self.workspace = self.workspaces[first_workspace]
# Migrate documents that are on the root workspace and have a better
# match now
|
palantir/python-language-server
|
462893cfae35229271524aec55c50a112aaddae3
|
diff --git a/test/test_workspace.py b/test/test_workspace.py
index 9b5b7b0..6ecdfbf 100644
--- a/test/test_workspace.py
+++ b/test/test_workspace.py
@@ -96,8 +96,8 @@ def test_multiple_workspaces(tmpdir, pyls):
added_workspaces = [{'uri': path_as_uri(str(x))}
for x in (workspace1_dir, workspace2_dir)]
- pyls.m_workspace__did_change_workspace_folders(
- added=added_workspaces, removed=[])
+ event = {'added': added_workspaces, 'removed': []}
+ pyls.m_workspace__did_change_workspace_folders(event)
for workspace in added_workspaces:
assert workspace['uri'] in pyls.workspaces
@@ -116,6 +116,84 @@ def test_multiple_workspaces(tmpdir, pyls):
workspace2_uri = added_workspaces[1]['uri']
assert msg['uri'] in pyls.workspaces[workspace2_uri]._docs
- pyls.m_workspace__did_change_workspace_folders(
- added=[], removed=[added_workspaces[0]])
+ event = {'added': [], 'removed': [added_workspaces[0]]}
+ pyls.m_workspace__did_change_workspace_folders(event)
assert workspace1_uri not in pyls.workspaces
+
+
+def test_multiple_workspaces_wrong_removed_uri(pyls):
+ workspace = {'uri': 'Test123'}
+ event = {'added': [], 'removed': [workspace]}
+ pyls.m_workspace__did_change_workspace_folders(event)
+ assert workspace['uri'] not in pyls.workspaces
+
+
+def test_root_workspace_changed(pyls):
+ test_uri = 'Test123'
+ pyls.root_uri = test_uri
+ pyls.workspace._root_uri = test_uri
+
+ workspace1 = {'uri': test_uri}
+ workspace2 = {'uri': 'NewTest456'}
+
+ event = {'added': [workspace2], 'removed': [workspace1]}
+ pyls.m_workspace__did_change_workspace_folders(event)
+
+ assert workspace2['uri'] == pyls.workspace._root_uri
+ assert workspace2['uri'] == pyls.root_uri
+
+
+def test_root_workspace_not_changed(pyls):
+ # removed uri != root_uri
+ test_uri_1 = 'Test12'
+ pyls.root_uri = test_uri_1
+ pyls.workspace._root_uri = test_uri_1
+ workspace1 = {'uri': 'Test1234'}
+ workspace2 = {'uri': 'NewTest456'}
+ event = {'added': [workspace2], 'removed': [workspace1]}
+ pyls.m_workspace__did_change_workspace_folders(event)
+ assert test_uri_1 == pyls.workspace._root_uri
+ assert test_uri_1 == pyls.root_uri
+ # empty 'added' list
+ test_uri_2 = 'Test123'
+ new_root_uri = workspace2['uri']
+ pyls.root_uri = test_uri_2
+ pyls.workspace._root_uri = test_uri_2
+ workspace1 = {'uri': test_uri_2}
+ event = {'added': [], 'removed': [workspace1]}
+ pyls.m_workspace__did_change_workspace_folders(event)
+ assert new_root_uri == pyls.workspace._root_uri
+ assert new_root_uri == pyls.root_uri
+ # empty 'removed' list
+ event = {'added': [workspace1], 'removed': []}
+ pyls.m_workspace__did_change_workspace_folders(event)
+ assert new_root_uri == pyls.workspace._root_uri
+ assert new_root_uri == pyls.root_uri
+ # 'added' list has no 'uri'
+ workspace2 = {'TESTuri': 'Test1234'}
+ event = {'added': [workspace2], 'removed': [workspace1]}
+ pyls.m_workspace__did_change_workspace_folders(event)
+ assert new_root_uri == pyls.workspace._root_uri
+ assert new_root_uri == pyls.root_uri
+
+
+def test_root_workspace_removed(tmpdir, pyls):
+ workspace1_dir = tmpdir.mkdir('workspace1')
+ workspace2_dir = tmpdir.mkdir('workspace2')
+ root_uri = pyls.root_uri
+
+ # Add workspaces to the pyls
+ added_workspaces = [{'uri': path_as_uri(str(x))}
+ for x in (workspace1_dir, workspace2_dir)]
+ event = {'added': added_workspaces, 'removed': []}
+ pyls.m_workspace__did_change_workspace_folders(event)
+
+ # Remove the root workspace
+ removed_workspaces = [{'uri': root_uri}]
+ event = {'added': [], 'removed': removed_workspaces}
+ pyls.m_workspace__did_change_workspace_folders(event)
+
+ # Assert that the first of the workspaces (in alphabetical order) is now
+ # the root workspace
+ assert pyls.root_uri == path_as_uri(str(workspace1_dir))
+ assert pyls.workspace._root_uri == path_as_uri(str(workspace1_dir))
|
Workspace folders not implemented completely
The signature for didChangeWorkspaceFolders
```python
def m_workspace__did_change_workspace_folders(self, added=None, removed=None, **_kwargs):
```
assumes `added` and `removed` are children of the `params` field, but according to the spec they should be nested under `event`? (See https://microsoft.github.io/language-server-protocol/specifications/specification-3-14/#workspace_didChangeWorkspaceFolders)
Results in:
```
:: --> pyls workspace/didChangeWorkspaceFolders: {'event': {'removed': [], 'added': []}}
pyls: 2019-12-19 18:50:04,738 UTC - ERROR - pyls_jsonrpc.endpoint - Failed to handle notification workspace/didChangeWorkspaceFolders: {'event': {'removed': [], 'added': []}}
pyls: Traceback (most recent call last):
pyls: File "/usr/local/lib/python3.7/site-packages/pyls_jsonrpc/endpoint.py", line 142, in _handle_notification
pyls: handler_result = handler(params)
pyls: File "/usr/local/lib/python3.7/site-packages/pyls_jsonrpc/dispatchers.py", line 23, in handler
pyls: return method(**(params or {}))
pyls: File "/usr/local/lib/python3.7/site-packages/pyls/python_ls.py", line 363, in m_workspace__did_change_workspace_folders
pyls: for removed_info in removed:
pyls: TypeError: 'NoneType' object is not iterable
```
The server also does not take the initial list of workspaces from `initialize` - it only creates a single workspace for `rootUri`. Sending a `didChangeWorkspaceFolders` with an entry in `removed` that is not the _rootUri_ will result in a KeyError.
|
0.0
|
462893cfae35229271524aec55c50a112aaddae3
|
[
"test/test_workspace.py::test_multiple_workspaces",
"test/test_workspace.py::test_multiple_workspaces_wrong_removed_uri",
"test/test_workspace.py::test_root_workspace_changed",
"test/test_workspace.py::test_root_workspace_not_changed",
"test/test_workspace.py::test_root_workspace_removed"
] |
[
"test/test_workspace.py::test_local",
"test/test_workspace.py::test_put_document",
"test/test_workspace.py::test_get_document",
"test/test_workspace.py::test_get_missing_document",
"test/test_workspace.py::test_rm_document",
"test/test_workspace.py::test_non_root_project[metafiles0]",
"test/test_workspace.py::test_non_root_project[metafiles1]",
"test/test_workspace.py::test_non_root_project[metafiles2]",
"test/test_workspace.py::test_root_project_with_no_setup_py"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-25 17:12:47+00:00
|
mit
| 4,454 |
|
palazzem__pysettings-12
|
diff --git a/README.md b/README.md
index cfb6c5a..3f4e408 100644
--- a/README.md
+++ b/README.md
@@ -105,6 +105,54 @@ settings.description = "Dry run mode!"
settings.is_valid() # raises ConfigNotValid exception
```
+### Test your Settings (pytest)
+
+If you need to change some of your settings during tests, you can use the following snippet
+to restore the previous settings after each test:
+
+```python
+# tests/fixtures/settings.py
+from pysettings.base import BaseSettings
+from pysettings.options import Option
+from pysettings.validators import is_https_url
+
+# Class definition
+class TestSettings(BaseSettings):
+ url = Option(validators=[is_https_url])
+ description = Option()
+
+# Use settings in your application
+settings = TestSettings()
+settings.url = "https://example.com"
+settings.description = "A shiny Website!"
+settings.is_valid()
+
+# tests/conftest.py
+import copy
+import pytest
+
+from .fixtures import settings as config
+
+
[email protected]
+def settings():
+ previous_config = copy.deepcopy(config.settings)
+ yield config.settings
+ config.settings = previous_config
+
+# tests/test_settings.py
+def test_settings_changes_1(settings):
+ assert settings.description == "A shiny Website!"
+ settings.description = "Test 1"
+ assert settings.description == "Test 1"
+
+
+def test_settings_changes_2(settings):
+ assert settings.description == "A shiny Website!"
+ settings.description = "Test 2"
+ assert settings.description == "Test 2"
+```
+
## Development
We accept external contributions even though the project is mostly designed for personal
diff --git a/pysettings/base.py b/pysettings/base.py
index 3fe864d..9a00916 100644
--- a/pysettings/base.py
+++ b/pysettings/base.py
@@ -1,3 +1,5 @@
+import copy
+
from .options import Option
from .exceptions import OptionNotAvailable, ConfigNotValid
@@ -25,7 +27,11 @@ class BaseSettings(object):
for attr, value in self.__class__.__dict__.items():
option = self._get_option(attr)
if isinstance(option, Option):
+ # Store the attribute name in the options list and copy the
+ # Option object to avoid sharing the same instance across
+ # settings instances
self._options.append(attr)
+ object.__setattr__(self, attr, copy.deepcopy(option))
def __setattr__(self, name, value):
"""Config attributes must be of type Option. This setattr() ensures that the
@@ -68,12 +74,18 @@ class BaseSettings(object):
find the attribute. In that case it means the given attribute is not
a configuration option that was defined as class attributes.
+ Internal attributes are still accessible.
+
Args:
name: the attribute name to retrieve.
Raise:
OptionNotAvailable: the configuration option is not present.
"""
- raise OptionNotAvailable("the option is not present in the current config")
+ if name.startswith("__") and name.endswith("__"):
+ # Providing access to internal attributes is required for pickle and copy
+ return object.__getattribute__(self, name)
+ else:
+ raise OptionNotAvailable("the option is not present in the current config")
def _get_option(self, name):
"""Retrieve the Option instance instead of proxying the call to retrieve
|
palazzem/pysettings
|
4c6889c0dfd6ecb81dc4fc565ea82bc18f633373
|
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..57e8978
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,11 @@
+import copy
+import pytest
+
+from .fixtures import settings as config
+
+
[email protected]
+def settings():
+ previous_config = copy.deepcopy(config.settings)
+ yield config.settings
+ config.settings = previous_config
diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/fixtures/settings.py b/tests/fixtures/settings.py
new file mode 100644
index 0000000..75761c9
--- /dev/null
+++ b/tests/fixtures/settings.py
@@ -0,0 +1,16 @@
+from pysettings.base import BaseSettings
+from pysettings.options import Option
+from pysettings.validators import is_https_url
+
+
+# Class definition
+class TestSettings(BaseSettings):
+ url = Option(validators=[is_https_url])
+ description = Option()
+
+
+# Use settings in your application
+settings = TestSettings()
+settings.url = "https://example.com"
+settings.description = "A shiny Website!"
+settings.is_valid()
diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py
new file mode 100644
index 0000000..1e15763
--- /dev/null
+++ b/tests/test_fixtures.py
@@ -0,0 +1,10 @@
+def test_settings_changes_1(settings):
+ assert settings.description == "A shiny Website!"
+ settings.description = "Test 1"
+ assert settings.description == "Test 1"
+
+
+def test_settings_changes_2(settings):
+ assert settings.description == "A shiny Website!"
+ settings.description = "Test 2"
+ assert settings.description == "Test 2"
diff --git a/tests/test_settings.py b/tests/test_settings.py
index d00d0f9..b672d3f 100644
--- a/tests/test_settings.py
+++ b/tests/test_settings.py
@@ -1,4 +1,5 @@
import pytest
+import copy
from pysettings.base import BaseSettings
from pysettings.options import Option
@@ -41,6 +42,18 @@ def test_config_get_value():
assert config.home == "test"
+def test_config_instance():
+ """Should return a new instance of the config and not use the Class options"""
+
+ class SettingsTest(BaseSettings):
+ home = Option(default="klass")
+
+ config = SettingsTest()
+ config.home = "instance"
+ assert SettingsTest.home.value == "klass"
+ assert config.home == "instance"
+
+
def test_config_set_value_not_available():
"""Should raise an exception if the option is not present"""
@@ -132,3 +145,33 @@ def test_config_is_not_valid_exception(mocker):
with pytest.raises(ConfigNotValid):
assert config.is_valid()
+
+
+def test_config_deepcopy():
+ """Should clone the configuration when deepcopy() is called"""
+
+ class SettingsTest(BaseSettings):
+ home = Option(default="original")
+
+ config = SettingsTest()
+ clone_config = copy.deepcopy(config)
+ clone_config.home = "clone"
+ # Should be different Settings
+ assert config != clone_config
+ assert config.home == "original"
+ assert clone_config.home == "clone"
+
+
+def test_config_copy():
+ """Should clone the configuration when copy() is called"""
+
+ class SettingsTest(BaseSettings):
+ home = Option(default="original")
+
+ config = SettingsTest()
+ clone_config = copy.copy(config)
+ clone_config.home = "shallow-copy"
+ # Should be different Settings but same Option (shallow copy)
+ assert config != clone_config
+ assert config.home == "shallow-copy"
+ assert clone_config.home == "shallow-copy"
|
Unable to clone/copy/deepcopy settings
### Overview
Whenever `settings.clone()` or `copy.copy(settings)` or `copy.deepcopy(settings)` are used, the action raises the following exception:
```
pysettings.exceptions.OptionNotAvailable: the option is not present in the current config
```
We should permit such actions, especially to simplify settings changes during testing.
|
0.0
|
4c6889c0dfd6ecb81dc4fc565ea82bc18f633373
|
[
"tests/test_fixtures.py::test_settings_changes_1",
"tests/test_fixtures.py::test_settings_changes_2",
"tests/test_settings.py::test_config_instance",
"tests/test_settings.py::test_config_deepcopy",
"tests/test_settings.py::test_config_copy"
] |
[
"tests/test_settings.py::test_config_constructor",
"tests/test_settings.py::test_config_set_value",
"tests/test_settings.py::test_config_get_value",
"tests/test_settings.py::test_config_set_value_not_available",
"tests/test_settings.py::test_config_get_value_not_available",
"tests/test_settings.py::test_config_is_valid_all_options",
"tests/test_settings.py::test_config_is_valid",
"tests/test_settings.py::test_config_is_not_valid",
"tests/test_settings.py::test_config_is_not_valid_exception"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-08 17:41:36+00:00
|
bsd-3-clause
| 4,455 |
|
panosz__alpha_shapes-2
|
diff --git a/alpha_shapes/alpha_shapes.py b/alpha_shapes/alpha_shapes.py
index e720a50..fe7b952 100644
--- a/alpha_shapes/alpha_shapes.py
+++ b/alpha_shapes/alpha_shapes.py
@@ -2,7 +2,7 @@
Utility module for the calculation of alpha shapes
"""
-from functools import wraps
+import warnings
import numpy as np
from matplotlib.tri import Triangulation
@@ -23,6 +23,10 @@ class OptimizationFailure(AlphaException):
pass
+class OptimizationWarnging(UserWarning):
+ pass
+
+
class Delaunay(Triangulation):
"""
Visitor sublclass of matplotlib.tri.Triangulation.
@@ -121,24 +125,39 @@ class Alpha_Shaper(Delaunay):
n_points = self.x.size
return set(range(n_points)) - set(np.ravel(simplices))
- def optimize(self):
+ def _get_minimum_fully_covering_index_of_simplices(self):
+ """
+ Return the minimum index of simplices needed to cover all vertices.
+ The set of all simplices up to this index is fully covering.
+ """
# At least N//3 triangles are needed to connect N points.
simplices = self._sorted_simplices()
n_start = len(self) // 3
n_finish = len(self)
uncovered_vertices = self._uncovered_vertices(simplices[:n_start])
+ if not uncovered_vertices:
+ return n_start
+
for n in range(n_start, n_finish):
+ simplex = simplices[n]
+ uncovered_vertices -= set(simplex)
+
if not uncovered_vertices:
- alpha_opt = 1 / np.sqrt(self._sorted_circumradii_sw()[n])
- shape = self._shape_from_simplices(simplices[:n])
- self.set_mask_at_alpha(alpha_opt)
- return alpha_opt, shape
+ return n
- simplex = simplices[n]
- for vertices in simplex:
- uncovered_vertices.discard(vertices)
+ if uncovered_vertices:
+ raise OptimizationFailure("Maybe there are duplicate points?")
- raise OptimizationFailure()
+ def optimize(self):
+ # At least N//3 triangles are needed to connect N points.
+ n_min = self._get_minimum_fully_covering_index_of_simplices()
+ print(n_min)
+ print(f"{len(self)=}")
+ alpha_opt = 1 / np.sqrt(self._sorted_circumradii_sw()[n_min]) - 1e-10
+ simplices = self._sorted_simplices()
+ shape = self._shape_from_simplices(simplices[: n_min + 1])
+ self.set_mask_at_alpha(alpha_opt)
+ return alpha_opt, shape
def set_mask_at_alpha(self, alpha: float):
"""
diff --git a/examples/simple.py b/examples/simple.py
index 767f03e..c4459d6 100644
--- a/examples/simple.py
+++ b/examples/simple.py
@@ -56,7 +56,7 @@ ax0.scatter(*zip(*points))
ax0.set_title('data')
ax1.scatter(*zip(*points))
plot_alpha_shape(ax1, alpha_shape)
-ax1.set_title(f"$\\alpha={alpha_opt:.3}$")
+ax1.set_title(f"$\\alpha_{{\\mathrm{{opt}}}}={alpha_opt:.3}$")
for ax in (ax0, ax1):
ax.set_aspect('equal')
|
panosz/alpha_shapes
|
31fb472e60461afa4bdfe8a570eb0dd9b5ba0d3f
|
diff --git a/tests/test_Alpha_Shaper.py b/tests/test_Alpha_Shaper.py
new file mode 100644
index 0000000..7824e64
--- /dev/null
+++ b/tests/test_Alpha_Shaper.py
@@ -0,0 +1,30 @@
+"""
+tests for Alpha_Shaper.py
+"""
+import numpy as np
+import pytest
+
+from alpha_shapes import Alpha_Shaper
+
+
+def test_optimization_with_strongly_shaped_points():
+ """
+ Test the optimization with strongly shaped points
+ issue #1
+ """
+ points = [
+ (363820.32, 5771887.69),
+ (363837.36, 5771916.33),
+ (363870.03, 5771951.57),
+ (363859.3, 5771943.9),
+ (363829.7, 5771861.92),
+ (363821.03, 5771850.18),
+ (363844.05, 5771928.69),
+ (363828.75, 5771906.28),
+ ]
+ shaper = Alpha_Shaper(points)
+ alpha_opt, alpha_shape = shaper.optimize()
+
+ # check that no simplex is masked
+ not_masked = np.logical_not(shaper.mask)
+ assert np.all(not_masked)
|
Bug in alpha optimization
Hey Panosz,
first of all, thank you for sharing your code. This is really helpful.
However, I found a small bug in the optimization of the parameter alpha.
Your current code for the function "optimize":
def optimize(self):
# At least N//3 triangles are needed to connect N points.
simplices = self._sorted_simplices()
n_start = len(self)//3
n_finish = len(self)+1
uncovered_vertices = self._uncovered_vertices(simplices[:n_start])
for n in range(n_start, n_finish):
if not uncovered_vertices:
alpha_opt = 1/np.sqrt(self._sorted_circumradii_sw()[n]) ########### HERE OCCURS THE PROBLEM #############
shape = self._shape_from_simplices(simplices[:n])
return alpha_opt, shape
for vertices in simplices[n]:
uncovered_vertices.discard(vertices)
raise OptimizationFailure()
In alpha_shapes.alpha_shapes.py in line 105, it says:
alpha_opt = 1/np.sqrt(self._sorted_circumradii_sw()[n])
When n takes on the last value of the for-loop, n takes on the value of the number of simplices.
Thus, n is greater by 1 than the last index from the list "simplices".
Thus, we get the error n is out of range.
For example, we have 7 simplices, then n=7. However, the last index of the list "simplices" is 6.
You can produce this error by optimizing alpha for the following list of coordinates:
[(363820.32, 5771887.69),
(363837.36, 5771916.33),
(363870.03, 5771951.57),
(363859.3, 5771943.9),
(363829.7, 5771861.92),
(363821.03, 5771850.18),
(363844.05, 5771928.69),
(363828.75, 5771906.28)]
Furthermore, you correctly stated that we need at least N//3 triangles to connect N Points.
However, when calculating the minimum number triangles, you do not take the number of points but the number of triangles as N:
n_start = len(self)//3
This does not produce an error, but might lead to a larger number of iterations. Thought I might mention it.
Here is my suggestion for fixing the two issues:
def optimize(self):
simplices = self._sorted_simplices()
# Fix for the minimum number of triangle issue
n_points = self.x.size # number of x_coordinates
n_start = n_points//3
n_finish = len(self)+1
uncovered_vertices = self._uncovered_vertices(simplices[:n_start])
for n in range(n_start, n_finish):
if not uncovered_vertices:
# Fix for the out-of-range-issue
if n == len(self):
alpha_opt = 1/np.sqrt(self._sorted_circumradii_sw()[n-1])
else:
alpha_opt = 1/np.sqrt(self._sorted_circumradii_sw()[n])
shape = self._shape_from_simplices(simplices[:n])
return alpha_opt, shape
for vertices in simplices[n]:
uncovered_vertices.discard(vertices)
raise OptimizationFailure()
Best regards
|
0.0
|
31fb472e60461afa4bdfe8a570eb0dd9b5ba0d3f
|
[
"tests/test_Alpha_Shaper.py::test_optimization_with_strongly_shaped_points"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-26 13:57:01+00:00
|
mit
| 4,456 |
|
pappasam__toml-sort-36
|
diff --git a/toml_sort/tomlsort.py b/toml_sort/tomlsort.py
index f17d54e..e43d848 100644
--- a/toml_sort/tomlsort.py
+++ b/toml_sort/tomlsort.py
@@ -280,7 +280,9 @@ class TomlSort:
new_array_value.extend(comments)
new_array_value.append(array_item)
- if not (multiline and self.format_config.trailing_comma_inline_array):
+ if len(new_array_value) != 0 and not (
+ multiline and self.format_config.trailing_comma_inline_array
+ ):
new_array_value[-1].comma = Whitespace("")
if multiline:
|
pappasam/toml-sort
|
48da498f0d8ea5f31729f28a83f4ee2ae72e0d64
|
diff --git a/tests/examples/inline.toml b/tests/examples/inline.toml
index 49a1718..f54ef3d 100644
--- a/tests/examples/inline.toml
+++ b/tests/examples/inline.toml
@@ -1,4 +1,6 @@
[b]
+z = []
+q = {}
blist=[ 'c', 'a', 'b'] # Comment
alist = [
'g',
diff --git a/tests/examples/sorted/inline-default.toml b/tests/examples/sorted/inline-default.toml
index b47b062..71e586d 100644
--- a/tests/examples/sorted/inline-default.toml
+++ b/tests/examples/sorted/inline-default.toml
@@ -13,6 +13,8 @@ test = [
]
[b]
+z = []
+q = {}
blist = ['c', 'a', 'b'] # Comment
alist = [
'g',
diff --git a/tests/examples/sorted/inline-no-comments-no-table-sort.toml b/tests/examples/sorted/inline-no-comments-no-table-sort.toml
index ad7230f..bdfc385 100644
--- a/tests/examples/sorted/inline-no-comments-no-table-sort.toml
+++ b/tests/examples/sorted/inline-no-comments-no-table-sort.toml
@@ -17,6 +17,8 @@ alist = [
'w',
]
blist = ['a', 'b', 'c']
+q = {}
+z = []
[a]
test = [
diff --git a/tests/examples/sorted/inline.toml b/tests/examples/sorted/inline.toml
index e876ac7..b0985d4 100644
--- a/tests/examples/sorted/inline.toml
+++ b/tests/examples/sorted/inline.toml
@@ -37,3 +37,5 @@ alist = [
'w' # Comment inline
]
blist = ['a', 'b', 'c'] # Comment
+q = {}
+z = []
|
IndexError exception on an empty array
TOML file:
```toml
# foo.toml
[tool.foo]
bar = []
```
Exception:
```
$ python -V
Python 3.11.0
$ toml-sort --version
0.22.0
$ toml-sort foo.toml
Traceback (most recent call last):
File "/private/tmp/foo/.venv/bin/toml-sort", line 8, in <module>
sys.exit(cli())
^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/cli.py", line 356, in cli
).sorted()
^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 634, in sorted
sorted_toml = self.toml_doc_sorted(toml_doc)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 620, in toml_doc_sorted
item.key, self.toml_elements_sorted(item, sorted_document)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 439, in toml_elements_sorted
item.key, self.toml_elements_sorted(item, previous_item)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 435, in toml_elements_sorted
for item in self.sorted_children_table(original.children):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 387, in sorted_children_table
non_tables = self.sort_items(
^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 359, in sort_items
item.value = self.sort_item(item.value)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 306, in sort_item
return self.sort_array(item, indent_depth=indent_depth)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/private/tmp/foo/.venv/lib/python3.11/site-packages/toml_sort/tomlsort.py", line 284, in sort_array
new_array_value[-1].comma = Whitespace("")
~~~~~~~~~~~~~~~^^^^
IndexError: list index out of range
```
|
0.0
|
48da498f0d8ea5f31729f28a83f4ee2ae72e0d64
|
[
"tests/test_cli.py::test_cli_defaults[inline-sorted/inline-default]",
"tests/test_cli.py::test_cli_args[inline-path_sorted3-args3]",
"tests/test_cli.py::test_cli_args[inline-path_sorted4-args4]",
"tests/test_cli.py::test_cli_args[inline-path_sorted5-args5]",
"tests/test_toml_sort.py::test_tomlsort[inline-inline-args0]"
] |
[
"tests/test_cli.py::test_cli_defaults[from-toml-lang-sorted/from-toml-lang]",
"tests/test_cli.py::test_cli_defaults[pyproject-weird-order-sorted/pyproject-weird-order]",
"tests/test_cli.py::test_cli_defaults[comment-sorted/comment-header-footer]",
"tests/test_cli.py::test_cli_args[comment-path_sorted0-args0]",
"tests/test_cli.py::test_cli_args[comment-path_sorted1-args1]",
"tests/test_cli.py::test_cli_args[comment-path_sorted2-args2]",
"tests/test_cli.py::test_multiple_files_check[multiple",
"tests/test_cli.py::test_multiple_files_check[single",
"tests/test_cli.py::test_multiple_files_check[none",
"tests/test_cli.py::test_multiple_files_in_place",
"tests/test_cli.py::test_multiple_files_and_errors[--check",
"tests/test_cli.py::test_multiple_files_and_errors[cannot",
"tests/test_cli.py::test_load_config_file_read",
"tests/test_cli.py::test_load_config_file[-expected0]",
"tests/test_cli.py::test_load_config_file[[tool.other]\\nfoo=2-expected1]",
"tests/test_cli.py::test_load_config_file[[tool.tomlsort]-expected2]",
"tests/test_cli.py::test_load_config_file[[tool.tomlsort]\\nall=true-expected3]",
"tests/test_cli.py::test_load_config_file[[tool.tomlsort]\\nspaces_before_inline_comment=4-expected4]",
"tests/test_cli.py::test_load_config_file_invalid[[tool.tomlsort]\\nunknown=2]",
"tests/test_cli.py::test_load_config_file_invalid[[tool.tomlsort]\\nall=42]",
"tests/test_toml_sort.py::test_sort_toml_is_str",
"tests/test_toml_sort.py::test_tomlsort[comment-comment-comments-preserved-args1]",
"tests/test_toml_sort.py::test_tomlsort[comment-comment-no-comments-args2]",
"tests/test_toml_sort.py::test_tomlsort[comment-comment-header-footer-args3]",
"tests/test_toml_sort.py::test_tomlsort[from-toml-lang-from-toml-lang-args4]",
"tests/test_toml_sort.py::test_tomlsort[pyproject-weird-order-pyproject-weird-order-args5]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-12-31 14:19:25+00:00
|
mit
| 4,457 |
|
paris-saclay-cds__specio-44
|
diff --git a/specio/core/functions.py b/specio/core/functions.py
index c47f79d..8185e05 100644
--- a/specio/core/functions.py
+++ b/specio/core/functions.py
@@ -144,7 +144,7 @@ def _validate_filenames(uri):
return sorted(glob.glob(os.path.expanduser(uri)))
-def _zip_spectrum(spectrum):
+def _zip_spectrum(spectrum, tol_wavelength):
"""Compress if possible several Spectrum into a single one.
Parameters
@@ -152,6 +152,10 @@ def _zip_spectrum(spectrum):
spectrum : list of Spectrum
The list of Spectrum to zip.
+ tol_wavelength : float
+ Tolerance to merge spectrum when their wavelength are slightly
+ different.
+
Returns
-------
zipped_spectrum : Spectrum or list of Spectrum
@@ -166,7 +170,8 @@ def _zip_spectrum(spectrum):
wavelength = spectrum[0].wavelength
try:
consistent_wavelength = [np.allclose(sp.wavelength,
- wavelength)
+ wavelength,
+ atol=tol_wavelength)
for sp in spectrum]
if not all(consistent_wavelength):
return spectrum
@@ -194,7 +199,7 @@ def _zip_spectrum(spectrum):
return output_spectrum
-def specread(uri, format=None, **kwargs):
+def specread(uri, format=None, tol_wavelength=1e-5, **kwargs):
"""Read spectra in a given format.
Reads spectrum from the specified file. Returns a list or a
@@ -215,6 +220,10 @@ def specread(uri, format=None, **kwargs):
The format to use to read the file. By default specio selects
the appropriate for you based on the filename and its contents.
+ tol_wavelength : float, optional
+ Tolerance to merge spectrum when their wavelength are slightly
+ different.
+
kwargs : dict
Further keyword arguments are passed to the reader. See :func:`.help`
to see what arguments are available for a particular format.
@@ -241,7 +250,7 @@ def specread(uri, format=None, **kwargs):
spectrum = _get_reader_get_data(uri, format, **kwargs)
if isinstance(spectrum, list):
- spectrum = _zip_spectrum(spectrum)
+ spectrum = _zip_spectrum(spectrum, tol_wavelength)
return spectrum
|
paris-saclay-cds/specio
|
e966bc2b7f0955631517780272b8ebd62f6c6a1b
|
diff --git a/specio/core/tests/test_functions.py b/specio/core/tests/test_functions.py
index 0cfd489..049cdbb 100644
--- a/specio/core/tests/test_functions.py
+++ b/specio/core/tests/test_functions.py
@@ -87,14 +87,25 @@ def _generate_list_spectrum(*args):
for _ in range(n_spectrum)]
+def _generate_list_spectrum_close_wavelength(*args):
+ n_wavelength = 5
+ tol = 1e-3
+ wavelength = np.arange(5) + np.random.uniform(low=-tol, high=tol)
+ return Spectrum(np.random.random(n_wavelength),
+ wavelength,
+ None)
+
+
@pytest.mark.parametrize(
- "side_effect,spectra_type,spectra_shape",
- [(_generate_spectrum_identical_wavelength, Spectrum, (10, 5)),
- (_generate_spectrum_different_wavelength_size, list, 10),
- (_generate_spectrum_different_wavelength, list, 10),
- (_generate_list_spectrum, list, 30)])
-def test_specread_consitent_wavelength(side_effect, spectra_type,
- spectra_shape, mocker):
+ "side_effect,tol_wavelength,spectra_type,spectra_shape",
+ [(_generate_spectrum_identical_wavelength, 1e-5, Spectrum, (10, 5)),
+ (_generate_spectrum_different_wavelength_size, 1e-5, list, 10),
+ (_generate_spectrum_different_wavelength, 1e-5, list, 10),
+ (_generate_list_spectrum, 1e-5, list, 30),
+ (_generate_list_spectrum_close_wavelength, 1e-2, Spectrum, (10, 5)),
+ (_generate_list_spectrum_close_wavelength, 1e-5, list, 10)])
+def test_specread_consitent_wavelength(side_effect, tol_wavelength,
+ spectra_type, spectra_shape, mocker):
# emulate that we read several file
mocker.patch('specio.core.functions._validate_filenames',
return_value=['filename' for _ in range(10)])
@@ -103,7 +114,7 @@ def test_specread_consitent_wavelength(side_effect, spectra_type,
side_effect=side_effect)
# emulate the spectrum reading
- spectra = specread('')
+ spectra = specread('', tol_wavelength=tol_wavelength)
assert isinstance(spectra, spectra_type)
if isinstance(spectra, Spectrum):
assert spectra.amplitudes.shape == spectra_shape
|
Add an argument to read_csv to merge wavelength
|
0.0
|
e966bc2b7f0955631517780272b8ebd62f6c6a1b
|
[
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_identical_wavelength-1e-05-Spectrum-spectra_shape0]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_different_wavelength_size-1e-05-list-10]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_different_wavelength-1e-05-list-10]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum-1e-05-list-30]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum_close_wavelength-0.01-Spectrum-spectra_shape4]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum_close_wavelength-1e-05-list-10]"
] |
[
"specio/core/tests/test_functions.py::test_help",
"specio/core/tests/test_functions.py::test_get_reader",
"specio/core/tests/test_functions.py::test_specread_single_file"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-02-01 18:25:43+00:00
|
bsd-3-clause
| 4,458 |
|
pasqal-io__Pulser-105
|
diff --git a/pulser/waveforms.py b/pulser/waveforms.py
index f8f6c08..9a126c1 100644
--- a/pulser/waveforms.py
+++ b/pulser/waveforms.py
@@ -80,6 +80,19 @@ class Waveform(ABC):
def __repr__(self):
pass
+ @abstractmethod
+ def __mul__(self, other):
+ pass
+
+ def __neg__(self):
+ return self.__mul__(-1)
+
+ def __truediv__(self, other):
+ if other == 0:
+ raise ZeroDivisionError("Can't divide a waveform by zero.")
+ else:
+ return self.__mul__(1/other)
+
def __eq__(self, other):
if not isinstance(other, Waveform):
return False
@@ -188,6 +201,9 @@ class CompositeWaveform(Waveform):
def __repr__(self):
return f'CompositeWaveform({self.duration} ns, {self._waveforms!r})'
+ def __mul__(self, other):
+ return CompositeWaveform(*(wf * other for wf in self._waveforms))
+
class CustomWaveform(Waveform):
"""A custom waveform.
@@ -200,7 +216,7 @@ class CustomWaveform(Waveform):
def __init__(self, samples):
"""Initializes a custom waveform."""
- samples_arr = np.array(samples)
+ samples_arr = np.array(samples, dtype=float)
self._samples = samples_arr
with warnings.catch_warnings():
warnings.filterwarnings("error")
@@ -231,6 +247,9 @@ class CustomWaveform(Waveform):
def __repr__(self):
return f'CustomWaveform({self.duration} ns, {self.samples!r})'
+ def __mul__(self, other):
+ return CustomWaveform(self._samples * float(other))
+
class ConstantWaveform(Waveform):
"""A waveform of constant value.
@@ -276,6 +295,9 @@ class ConstantWaveform(Waveform):
return (f"ConstantWaveform({self._duration} ns, "
+ f"{self._value:.3g} rad/µs)")
+ def __mul__(self, other):
+ return ConstantWaveform(self._duration, self._value * float(other))
+
class RampWaveform(Waveform):
"""A linear ramp waveform.
@@ -328,6 +350,10 @@ class RampWaveform(Waveform):
return (f"RampWaveform({self._duration} ns, " +
f"{self._start:.3g}->{self._stop:.3g} rad/µs)")
+ def __mul__(self, other):
+ k = float(other)
+ return RampWaveform(self._duration, self._start * k, self._stop * k)
+
class BlackmanWaveform(Waveform):
"""A Blackman window of a specified duration and area.
@@ -402,3 +428,6 @@ class BlackmanWaveform(Waveform):
def __repr__(self):
return f"BlackmanWaveform({self._duration} ns, Area: {self._area:.3g})"
+
+ def __mul__(self, other):
+ return BlackmanWaveform(self._duration, self._area * float(other))
|
pasqal-io/Pulser
|
73f38c2e549003c810c84931c56d33ff886daf16
|
diff --git a/pulser/tests/test_waveforms.py b/pulser/tests/test_waveforms.py
index 94eab68..51e6997 100644
--- a/pulser/tests/test_waveforms.py
+++ b/pulser/tests/test_waveforms.py
@@ -122,9 +122,10 @@ def test_composite():
def test_custom():
- wf = CustomWaveform(np.arange(16))
+ data = np.arange(16, dtype=float)
+ wf = CustomWaveform(data)
assert wf.__str__() == 'Custom'
- assert wf.__repr__() == f'CustomWaveform(16 ns, {np.arange(16)!r})'
+ assert wf.__repr__() == f'CustomWaveform(16 ns, {data!r})'
def test_ramp():
@@ -149,3 +150,13 @@ def test_blackman():
wf = BlackmanWaveform.from_max_val(-10, -np.pi)
assert np.isclose(wf.integral, -np.pi)
assert np.min(wf.samples) > -10
+
+
+def test_ops():
+ assert -constant == ConstantWaveform(100, 3)
+ assert ramp * 2 == RampWaveform(2e3, 10, 38)
+ assert --custom == custom
+ assert blackman / 2 == BlackmanWaveform(40, np.pi / 2)
+ assert composite * 1 == composite
+ with pytest.raises(ZeroDivisionError):
+ constant / 0
|
Special operators for waveforms
I think it would be a nice improvement to have some special operators for waveforms. I came up with these ones:
- Negation: Doing `-wf` would return a Waveform with all it's samples' sign flipped
- Multiplication: Doing `wf * 2` would multiply all it's values by 2. It doesn't make sense to multiply two waveforms.
Not so sure these make sense:
- Addition: Doing `wf + 2` would add 2 to all values. `wf1 + wf2` would concatenate two waveforms.
- Absolute value
- Power
- Modulo
Feel free to discuss the pertinence of each or add more suggestions.
|
0.0
|
73f38c2e549003c810c84931c56d33ff886daf16
|
[
"pulser/tests/test_waveforms.py::test_ops"
] |
[
"pulser/tests/test_waveforms.py::test_duration",
"pulser/tests/test_waveforms.py::test_samples",
"pulser/tests/test_waveforms.py::test_integral",
"pulser/tests/test_waveforms.py::test_draw",
"pulser/tests/test_waveforms.py::test_eq",
"pulser/tests/test_waveforms.py::test_first_last",
"pulser/tests/test_waveforms.py::test_hash",
"pulser/tests/test_waveforms.py::test_composite",
"pulser/tests/test_waveforms.py::test_custom",
"pulser/tests/test_waveforms.py::test_ramp",
"pulser/tests/test_waveforms.py::test_blackman"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-16 17:21:10+00:00
|
apache-2.0
| 4,459 |
|
pasqal-io__Pulser-144
|
diff --git a/pulser/channels.py b/pulser/channels.py
index fdfbac2..f704781 100644
--- a/pulser/channels.py
+++ b/pulser/channels.py
@@ -139,3 +139,14 @@ class Rydberg(Channel):
"""
name: ClassVar[str] = 'Rydberg'
basis: ClassVar[str] = 'ground-rydberg'
+
+
+@dataclass(init=True, repr=False, frozen=True)
+class Microwave(Channel):
+ """Microwave adressing channel.
+
+ Channel targeting the transition between two rydberg states, thus encoding
+ the 'XY' basis. See base class.
+ """
+ name: ClassVar[str] = 'Microwave'
+ basis: ClassVar[str] = 'XY'
diff --git a/pulser/devices/_mock_device.py b/pulser/devices/_mock_device.py
index c35edf7..5d8817f 100644
--- a/pulser/devices/_mock_device.py
+++ b/pulser/devices/_mock_device.py
@@ -13,7 +13,7 @@
# limitations under the License.
from pulser.devices._device_datacls import Device
-from pulser.channels import Rydberg, Raman
+from pulser.channels import Rydberg, Raman, Microwave
MockDevice = Device(
@@ -37,5 +37,8 @@ MockDevice = Device(
max_targets=2000,
clock_period=1,
min_duration=1)),
+ ("mw_global", Microwave.Global(1000, 200,
+ clock_period=1,
+ min_duration=1))
)
)
diff --git a/pulser/sequence.py b/pulser/sequence.py
index bc93576..7dab2bb 100644
--- a/pulser/sequence.py
+++ b/pulser/sequence.py
@@ -135,6 +135,7 @@ class Sequence:
self._register = register
self._device = device
+ self._in_xy = False
self._calls = [_Call("__init__", (register, device), {})]
self._channels = {}
self._schedule = {}
@@ -166,9 +167,17 @@ class Sequence:
@property
def available_channels(self):
"""Channels still available for declaration."""
- return {id: ch for id, ch in self._device.channels.items()
- if id not in self._taken_channels.values()
- or self._device == MockDevice}
+ # Show all channels if none are declared, otherwise filter depending
+ # on whether the sequence is working on XY mode
+ if not self._channels:
+ return dict(self._device.channels)
+ else:
+ # MockDevice channels can be declared multiple times
+ return {id: ch for id, ch in self._device.channels.items()
+ if (id not in self._taken_channels.values()
+ or self._device == MockDevice)
+ and (ch.basis == 'XY' if self._in_xy else ch.basis != 'XY')
+ }
def is_parametrized(self):
"""States whether the sequence is parametrized.
@@ -211,6 +220,17 @@ class Sequence:
def declare_channel(self, name, channel_id, initial_target=None):
"""Declares a new channel to the Sequence.
+ The first declared channel implicitly defines the sequence's mode of
+ operation (i.e. the underlying Hamiltonian). In particular, if the
+ first declared channel is of type ``Microwave``, the sequence will work
+ in "XY Mode" and will not allow declaration of channels that do not
+ address the 'XY' basis. Inversely, declaration of a channel of another
+ type will block the declaration of ``Microwave`` channels.
+
+ Note:
+ Regular devices only allow a channel to be declared once, but
+ ``MockDevice`` channels can be repeatedly declared if needed.
+
Args:
name (str): Unique name for the channel in the sequence.
channel_id (str): How the channel is identified in the device.
@@ -231,10 +251,20 @@ class Sequence:
if channel_id not in self._device.channels:
raise ValueError("No channel %s in the device." % channel_id)
+ ch = self._device.channels[channel_id]
if channel_id not in self.available_channels:
- raise ValueError("Channel %s is not available." % channel_id)
+ if self._in_xy and ch.basis != 'XY':
+ raise ValueError(f"Channel '{ch}' cannot work simultaneously "
+ "with the declared 'Microwave' channel."
+ )
+ elif not self._in_xy and ch.basis == 'XY':
+ raise ValueError("Channel of type 'Microwave' cannot work "
+ "simultaneously with the declared channels.")
+ else:
+ raise ValueError(f"Channel {channel_id} is not available.")
- ch = self._device.channels[channel_id]
+ if ch.basis == 'XY' and not self._in_xy:
+ self._in_xy = True
self._channels[name] = ch
self._taken_channels[name] = channel_id
self._schedule[name] = []
@@ -430,15 +460,24 @@ class Sequence:
def measure(self, basis='ground-rydberg'):
"""Measures in a valid basis.
+ Note:
+ In addition to the supported bases of the selected device, allowed
+ measurement bases will depend on the mode of operation. In
+ particular, if using ``Microwave`` channels (XY mode), only
+ measuring in the 'XY' basis is allowed. Inversely, it is not
+ possible to measure in the 'XY' basis outside of XY mode.
+
Args:
basis (str): Valid basis for measurement (consult the
``supported_bases`` attribute of the selected device for
the available options).
"""
- available = self._device.supported_bases
+ available = (self._device.supported_bases - {'XY'} if not self._in_xy
+ else {'XY'})
if basis not in available:
raise ValueError(f"The basis '{basis}' is not supported by the "
- "selected device. The available options are: "
+ "selected device and operation mode. The "
+ "available options are: "
+ ", ".join(list(available)))
if hasattr(self, "_measurement"):
diff --git a/pulser/simulation.py b/pulser/simulation.py
index 0a3cf2e..c4af049 100644
--- a/pulser/simulation.py
+++ b/pulser/simulation.py
@@ -41,11 +41,17 @@ class Simulation:
def __init__(self, sequence, sampling_rate=1.0):
"""Initialize the Simulation with a specific pulser.Sequence."""
+ supported_bases = {"ground-rydberg", "digital"}
if not isinstance(sequence, Sequence):
raise TypeError("The provided sequence has to be a valid "
"pulser.Sequence instance.")
if not sequence._schedule:
raise ValueError("The provided sequence has no declared channels.")
+ not_supported = (set(ch.basis for ch in sequence._channels.values())
+ - supported_bases)
+ if not_supported:
+ raise NotImplementedError("Sequence with unsupported bases: "
+ + "".join(not_supported))
if all(sequence._schedule[x][-1].tf == 0 for x in sequence._channels):
raise ValueError("No instructions given for the channels in the "
"sequence.")
|
pasqal-io/Pulser
|
f54e98d252a1135ed5f941159cf81705a7084992
|
diff --git a/pulser/tests/test_devices.py b/pulser/tests/test_devices.py
index 458eaac..6494f7c 100644
--- a/pulser/tests/test_devices.py
+++ b/pulser/tests/test_devices.py
@@ -46,8 +46,8 @@ def test_mock():
assert dev.max_atom_num > 1000
assert dev.min_atom_distance <= 1
assert dev.interaction_coeff == 5008713
- names = ['Rydberg', 'Raman']
- basis = ['ground-rydberg', 'digital']
+ names = ['Rydberg', 'Raman', 'Microwave']
+ basis = ['ground-rydberg', 'digital', 'XY']
for ch in dev.channels.values():
assert ch.name in names
assert ch.basis == basis[names.index(ch.name)]
diff --git a/pulser/tests/test_sequence.py b/pulser/tests/test_sequence.py
index 44bf782..90d200b 100644
--- a/pulser/tests/test_sequence.py
+++ b/pulser/tests/test_sequence.py
@@ -64,11 +64,21 @@ def test_channel_declaration():
seq2.declare_channel('ch0', 'raman_local', initial_target='q1')
seq2.declare_channel('ch1', 'rydberg_global')
seq2.declare_channel('ch2', 'rydberg_global')
- assert set(seq2.available_channels) == available_channels
+ assert set(seq2.available_channels) == available_channels - {'mw_global'}
assert seq2._taken_channels == {'ch0': 'raman_local',
'ch1': 'rydberg_global',
'ch2': 'rydberg_global'}
assert seq2._taken_channels.keys() == seq2._channels.keys()
+ with pytest.raises(ValueError, match="type 'Microwave' cannot work "):
+ seq2.declare_channel('ch3', 'mw_global')
+
+ seq2 = Sequence(reg, MockDevice)
+ seq2.declare_channel('ch0', 'mw_global')
+ assert set(seq2.available_channels) == {'mw_global'}
+ with pytest.raises(
+ ValueError,
+ match="cannot work simultaneously with the declared 'Microwave'"):
+ seq2.declare_channel('ch3', 'rydberg_global')
def test_target():
@@ -173,6 +183,27 @@ def test_align():
seq.align('ch1')
+def test_measure():
+ pulse = Pulse.ConstantPulse(500, 2, -10, 0, post_phase_shift=np.pi)
+ seq = Sequence(reg, MockDevice)
+ seq.declare_channel('ch0', 'rydberg_global')
+ assert 'XY' in MockDevice.supported_bases
+ with pytest.raises(ValueError, match='not supported'):
+ seq.measure(basis='XY')
+ seq.measure()
+ with pytest.raises(SystemError, match='already been measured'):
+ seq.measure(basis='digital')
+ with pytest.raises(SystemError, match='Nothing more can be added.'):
+ seq.add(pulse, 'ch0')
+
+ seq = Sequence(reg, MockDevice)
+ seq.declare_channel('ch0', 'mw_global')
+ assert 'digital' in MockDevice.supported_bases
+ with pytest.raises(ValueError, match='not supported'):
+ seq.measure(basis='digital')
+ seq.measure(basis='XY')
+
+
def test_str():
seq = Sequence(reg, device)
seq.declare_channel('ch0', 'raman_local', initial_target='q0')
@@ -259,13 +290,7 @@ def test_sequence():
assert seq._total_duration == 4000
- with pytest.raises(ValueError, match='not supported'):
- seq.measure(basis='computational')
seq.measure(basis='digital')
- with pytest.raises(SystemError, match='already been measured'):
- seq.measure(basis='digital')
- with pytest.raises(SystemError, match='Nothing more can be added.'):
- seq.add(pulse1, 'ch0')
with patch('matplotlib.pyplot.show'):
seq.draw()
diff --git a/pulser/tests/test_simulation.py b/pulser/tests/test_simulation.py
index b938096..0a6db14 100644
--- a/pulser/tests/test_simulation.py
+++ b/pulser/tests/test_simulation.py
@@ -18,7 +18,7 @@ import pytest
import qutip
from pulser import Sequence, Pulse, Register, Simulation
-from pulser.devices import Chadoq2
+from pulser.devices import Chadoq2, MockDevice
from pulser.waveforms import BlackmanWaveform, RampWaveform, ConstantWaveform
q_dict = {"control1": np.array([-4., 0.]),
@@ -187,12 +187,17 @@ def test_building_basis_and_projection_operators():
def test_empty_sequences():
- seq = Sequence(reg, Chadoq2)
+ seq = Sequence(reg, MockDevice)
with pytest.raises(ValueError, match='no declared channels'):
Simulation(seq)
+ seq.declare_channel("ch0", "mw_global")
+ with pytest.raises(NotImplementedError):
+ Simulation(seq)
+
+ seq = Sequence(reg, MockDevice)
+ seq.declare_channel('test', 'rydberg_local', 'target')
+ seq.declare_channel("test2", "rydberg_global")
with pytest.raises(ValueError, match='No instructions given'):
- seq.declare_channel('test', 'rydberg_local', 'target')
- seq.declare_channel("test2", "rydberg_global")
Simulation(seq)
|
Adding the MW channel and specific functionality
Adding the MW channel type for implementation of the XY Hamiltonian. Note that, unlike with the Rydberg and Raman channels, the MW channel cannot coexist with other channel types on the same Sequence, as it requires a specific initialisation procedure and works on a basis that is incompatible with the other channels. This will demand changes to the `Sequence` and `Simulation` classes.
|
0.0
|
f54e98d252a1135ed5f941159cf81705a7084992
|
[
"pulser/tests/test_sequence.py::test_channel_declaration",
"pulser/tests/test_sequence.py::test_measure",
"pulser/tests/test_simulation.py::test_empty_sequences"
] |
[
"pulser/tests/test_devices.py::test_init",
"pulser/tests/test_devices.py::test_mock",
"pulser/tests/test_devices.py::test_rydberg_blockade",
"pulser/tests/test_devices.py::test_validate_register",
"pulser/tests/test_sequence.py::test_init",
"pulser/tests/test_sequence.py::test_target",
"pulser/tests/test_sequence.py::test_delay",
"pulser/tests/test_sequence.py::test_phase",
"pulser/tests/test_sequence.py::test_align",
"pulser/tests/test_sequence.py::test_str",
"pulser/tests/test_sequence.py::test_sequence",
"pulser/tests/test_simulation.py::test_extraction_of_sequences",
"pulser/tests/test_simulation.py::test_building_basis_and_projection_operators",
"pulser/tests/test_simulation.py::test_single_atom_simulation"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-10 14:34:05+00:00
|
apache-2.0
| 4,460 |
|
passaH2O__dorado-25
|
diff --git a/docs/source/userguide/index.rst b/docs/source/userguide/index.rst
index 7427ad5..8efb28d 100644
--- a/docs/source/userguide/index.rst
+++ b/docs/source/userguide/index.rst
@@ -53,14 +53,6 @@ Defining the `Particles`
Defining a :obj:`dorado.particle_track.Particles` class is a key step in using `dorado` to perform particle routing. To define a set of particles, the model parameters must first be defined as described above. The `Particles` class is initialized using an instance of the model parameters. From there, particles can be generated and routed.
-.. Note::
- When :obj:`dorado.particle_track.Particles` is initialized, all of the
- routing weights are automatically calculated. This may take some time for
- larger model domains, but allows for faster particle routing when particles
- are actually moved through the domain. There is a progress bar associated
- with this process so you don't feel like Python has gotten stuck in the
- object initialization.
-
Particle Generation and Routing
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/dorado/__init__.py b/dorado/__init__.py
index 2c2564f..5f2a210 100644
--- a/dorado/__init__.py
+++ b/dorado/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "2.4.0"
+__version__ = "2.4.1"
from . import lagrangian_walker
diff --git a/dorado/lagrangian_walker.py b/dorado/lagrangian_walker.py
index 225ca49..63f3901 100644
--- a/dorado/lagrangian_walker.py
+++ b/dorado/lagrangian_walker.py
@@ -9,7 +9,7 @@ from builtins import range, map
from math import cos
import numpy as np
from numpy.random import random
-from tqdm import tqdm
+from numpy import maximum, nansum
def random_pick_seed(choices, probs=None):
@@ -39,76 +39,63 @@ def random_pick_seed(choices, probs=None):
return choices[idx]
-def make_weight(Particles):
- """Make an array with the routing weights."""
- # local namespace function imports
- from numpy import maximum
- from numpy import nansum
- # init the weight array
- L, W = np.shape(Particles.stage)
- Particles.weight = np.zeros((L, W, 9))
- # do weighting calculation for each cell
- print('Calculating routing weights ...')
- for i in tqdm(list(range(1, L-1)), ascii=True):
- for j in list(range(1, W-1)):
- # weights for each location in domain
- # get stage values for neighboring cells
- stage_ind = Particles.stage[i-1:i+2, j-1:j+2]
+def make_weight(Particles, ind):
+ """Update weighting array with weights at this index"""
+ # get stage values for neighboring cells
+ stage_ind = Particles.stage[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2]
- # calculate surface slope weights
- weight_sfc = maximum(0,
- (Particles.stage[i, j]-stage_ind) /
- Particles.distances)
+ # calculate surface slope weights
+ weight_sfc = maximum(0,
+ (Particles.stage[ind] - stage_ind) /
+ Particles.distances)
- # calculate inertial component weights
- weight_int = maximum(0, ((Particles.qx[i, j] * Particles.jvec +
- Particles.qy[i, j] * Particles.ivec) /
- Particles.distances))
+ # calculate inertial component weights
+ weight_int = maximum(0, ((Particles.qx[ind] * Particles.jvec +
+ Particles.qy[ind] * Particles.ivec) /
+ Particles.distances))
- # get depth and cell types for neighboring cells
- depth_ind = Particles.depth[i-1:i+2, j-1:j+2]
- ct_ind = Particles.cell_type[i-1:i+2, j-1:j+2]
+ # get depth and cell types for neighboring cells
+ depth_ind = Particles.depth[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2]
+ ct_ind = Particles.cell_type[ind[0]-1:ind[0]+2, ind[1]-1:ind[1]+2]
- # set weights for cells that are too shallow, or invalid 0
- weight_sfc[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0
- weight_int[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0
+ # set weights for cells that are too shallow, or invalid 0
+ weight_sfc[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0
+ weight_int[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] = 0
- # if sum of weights is above 0 normalize by sum of weights
- if nansum(weight_sfc) > 0:
- weight_sfc = weight_sfc / nansum(weight_sfc)
+ # if sum of weights is above 0 normalize by sum of weights
+ if nansum(weight_sfc) > 0:
+ weight_sfc = weight_sfc / nansum(weight_sfc)
- # if sum of weight is above 0 normalize by sum of weights
- if nansum(weight_int) > 0:
- weight_int = weight_int / nansum(weight_int)
+ # if sum of weight is above 0 normalize by sum of weights
+ if nansum(weight_int) > 0:
+ weight_int = weight_int / nansum(weight_int)
- # define actual weight by using gamma, and weight components
- weight = Particles.gamma * weight_sfc + \
- (1 - Particles.gamma) * weight_int
+ # define actual weight by using gamma, and weight components
+ weight = Particles.gamma * weight_sfc + \
+ (1 - Particles.gamma) * weight_int
- # modify the weight by the depth and theta weighting parameter
- weight = depth_ind ** Particles.theta * weight
+ # modify the weight by the depth and theta weighting parameter
+ weight = depth_ind ** Particles.theta * weight
- # if the depth is below the minimum depth then location is not
- # considered therefore set the associated weight to nan
- weight[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] \
- = np.nan
+ # if the depth is below the minimum depth then location is not
+ # considered therefore set the associated weight to nan
+ weight[(depth_ind <= Particles.dry_depth) | (ct_ind == 2)] \
+ = np.nan
- # if it's a dead end with only nans and 0's, choose deepest cell
- if nansum(weight) <= 0:
- weight = np.zeros_like(weight)
- weight[depth_ind == np.max(depth_ind)] = 1.0
+ # if it's a dead end with only nans and 0's, choose deepest cell
+ if nansum(weight) <= 0:
+ weight = np.zeros_like(weight)
+ weight[depth_ind == np.max(depth_ind)] = 1.0
- # set weight in the true weight array
- Particles.weight[i, j, :] = weight.ravel()
-
- print('Finished routing weight calculation.')
+ # set weight in the true weight array
+ Particles.weight[ind[0], ind[1], :] = weight.ravel()
def get_weight(Particles, ind):
"""Choose new cell location given an initial location.
Function to randomly choose 1 of the surrounding 8 cells around the
- current index using the pre-calculated routing weights.
+ current index using the routing weights from make_weight.
**Inputs** :
@@ -124,6 +111,9 @@ def get_weight(Particles, ind):
New location given as a value between 1 and 8 (inclusive)
"""
+ # Check if weights have been computed for this location:
+ if nansum(Particles.weight[ind[0], ind[1], :]) <= 0:
+ make_weight(Particles, ind)
# randomly pick the new cell for the particle to move to using the
# random_pick function and the set of weights
if Particles.steepest_descent is not True:
diff --git a/dorado/particle_track.py b/dorado/particle_track.py
index 3a6886e..75d5de0 100644
--- a/dorado/particle_track.py
+++ b/dorado/particle_track.py
@@ -377,8 +377,8 @@ class Particles():
# initialize the walk_data
self.walk_data = None
- # create weights - this might take a bit of time for large domains
- lw.make_weight(self)
+ # initialize routing weights array
+ self.weight = np.zeros((self.stage.shape[0], self.stage.shape[1], 9))
# function to clear walk data if you've made a mistake while generating it
def clear_walk_data(self):
|
passaH2O/dorado
|
4ab8cc77496d52940d14c84a25a5f1acfd06d556
|
diff --git a/tests/test_lagrangian_walker.py b/tests/test_lagrangian_walker.py
index b43cb9e..a1d52d2 100644
--- a/tests/test_lagrangian_walker.py
+++ b/tests/test_lagrangian_walker.py
@@ -286,6 +286,11 @@ def test_make_weight_shallow():
ind = (1, 1)
# set seed
np.random.seed(0)
+ # do weighting calculation for each cell
+ L, W = np.shape(particles.stage)
+ for i in list(range(1, L-1)):
+ for j in list(range(1, W-1)):
+ lw.make_weight(particles, (i, j))
# make assertions about weights
# at index, index[4] (self) will be 1 while neighbors will be 0
assert particles.weight[1, 1, 4] == 1.0
@@ -328,6 +333,11 @@ def test_make_weight_equal_opportunity():
ind = (1, 1)
# set seed
np.random.seed(0)
+ # do weighting calculation for each cell
+ L, W = np.shape(particles.stage)
+ for i in list(range(1, L-1)):
+ for j in list(range(1, W-1)):
+ lw.make_weight(particles, (i, j))
# make assertions about weights
# at index, 3 neighbors will be equiprobable
assert np.sum(particles.weight[1, 1, :]) == 3.0
@@ -372,6 +382,11 @@ def test_make_weight_unequal_opportunity():
ind = (1, 1)
# set seed
np.random.seed(0)
+ # do weighting calculation for each cell
+ L, W = np.shape(particles.stage)
+ for i in list(range(1, L-1)):
+ for j in list(range(1, W-1)):
+ lw.make_weight(particles, (i, j))
# make assertions about weights
# at index, staying put index[4] higher probability than neighbors
assert particles.weight[1, 1, 4] > particles.weight[1, 1, 5]
@@ -411,6 +426,11 @@ def test_wet_boundary_no_weight():
particles = pt.Particles(tools)
# set seed
np.random.seed(0)
+ # do weighting calculation for each cell
+ L, W = np.shape(particles.stage)
+ for i in list(range(1, L-1)):
+ for j in list(range(1, W-1)):
+ lw.make_weight(particles, (i, j))
# assert weights at boundary cells should be 0
assert np.all(np.sum(particles.weight[0, :, 4]) == 0.0)
assert np.all(np.sum(particles.weight[-1, :, 4]) == 0.0)
|
Further Speeding Up Routing Weight Calculations
From #16, @wrightky said:
> So, looking at this code, I see that the main structure of the weight computation hasn't changed. We're still constructing small sub-arrays at each index, doing a few quick operations (max, cleaning, multiplications, summing), and saving the 9 resulting weights in a weight array. Surprised we hadn't tried this sooner given how much better it performs.
> It looks like the key reason the runtime scales better in this model isn't anything about how this computation works, it's how many times we do it. Originally, we performed these operations locally once for every particle at every iteration. So, the runtime scaled as Np_tracer * iterations_per_particle. Now, we perform this once for every cell, so it scales with domain size (L-2) * (W-2). I bet if you checked the example cases you benchmarked, the ratio of these values should give roughly the speedup you observed.
> One thing I wonder, though, is whether we could obtain even faster runtimes by modifying the structure of this computation itself, by switching from local array operations to global. Whatever is causing this function to take so long must be due to the fact that we're repeatedly constructing many sub-arrays in a loop and operating on them, instead of performing a few big global array operations (inside which we'd be taking advantage of all the fast numpy broadcasting stuff). In principal, if we broke up these operations (max, cleaning, multiplication, summing) into a loop over each of the D8 directions, with each being a global matrix operation, instead of a loop over each cell, we could reduce the amount of overhead repeatedly calling these functions. I don't know exactly how that scales, but it'd be the difference between time(np.max(small array)) * (L-2) * (W-2) and time(np.max(big array)) * 9. Does that scale better? Not sure.
So that is a potential route for further speeding up the routing weight calculation.
|
0.0
|
4ab8cc77496d52940d14c84a25a5f1acfd06d556
|
[
"tests/test_lagrangian_walker.py::test_make_weight_shallow",
"tests/test_lagrangian_walker.py::test_make_weight_equal_opportunity",
"tests/test_lagrangian_walker.py::test_make_weight_unequal_opportunity",
"tests/test_lagrangian_walker.py::test_wet_boundary_no_weight"
] |
[
"tests/test_lagrangian_walker.py::test_random_pick_seed",
"tests/test_lagrangian_walker.py::test_get_weight",
"tests/test_lagrangian_walker.py::test_calculate_new_ind",
"tests/test_lagrangian_walker.py::test_step_update_straight",
"tests/test_lagrangian_walker.py::test_step_update_diagonal",
"tests/test_lagrangian_walker.py::test_calc_travel_times",
"tests/test_lagrangian_walker.py::test_check_for_boundary",
"tests/test_lagrangian_walker.py::test_random_pick",
"tests/test_lagrangian_walker.py::test_get_weight_norm",
"tests/test_lagrangian_walker.py::test_get_weight_deep",
"tests/test_lagrangian_walker.py::test_make_weight_deep"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-22 22:27:27+00:00
|
mit
| 4,461 |
|
patrickfuchs__buildH-55
|
diff --git a/buildh/cli.py b/buildh/cli.py
index 5a4f220..be0ca38 100644
--- a/buildh/cli.py
+++ b/buildh/cli.py
@@ -187,6 +187,7 @@ def main():
try:
universe_woH = mda.Universe(args.topfile, args.xtc)
begin, end = check_slice_options(universe_woH, args.begin, args.end)
+ traj_file = True
except IndexError:
raise UserWarning("Slicing options are not correct.") from None
except:
@@ -197,6 +198,7 @@ def main():
universe_woH = mda.Universe(args.topfile)
begin = 0
end = 1
+ traj_file = False
except:
raise UserWarning("Can't create MDAnalysis universe with file {}"
.format(args.topfile))
@@ -219,9 +221,9 @@ def main():
if args.opdbxtc:
if core.is_allHs_present(args.defop, dic_lipid, dic_Cname2Hnames):
- core.gen_XTC_calcOP(args.opdbxtc, universe_woH, dic_OP, dic_lipid,
- dic_Cname2Hnames, dic_corresp_numres_index_dic_OP,
- begin, end)
+ core.gen_coordinates_calcOP(args.opdbxtc, universe_woH, dic_OP, dic_lipid,
+ dic_Cname2Hnames, dic_corresp_numres_index_dic_OP,
+ begin, end, traj_file)
else:
raise UserWarning("Error on the number of H's to rebuild.")
diff --git a/buildh/core.py b/buildh/core.py
index 5cc40ca..5f20e70 100644
--- a/buildh/core.py
+++ b/buildh/core.py
@@ -559,20 +559,52 @@ def fast_build_all_Hs_calc_OP(universe_woH, begin, end,
print()
-def gen_XTC_calcOP(basename, universe_woH, dic_OP, dic_lipid,
- dic_Cname2Hnames, dic_corresp_numres_index_dic_OP,
- begin, end):
+def gen_coordinates_calcOP(basename, universe_woH, dic_OP, dic_lipid,
+ dic_Cname2Hnames, dic_corresp_numres_index_dic_OP,
+ begin, end, traj_file):
"""
- Generate a new trajectory with computed hydrogens
+ Generate coordinates files (pdb and/or xtc) with computed hydrogens
and compute the order parameter.
- """
+ If `traj_file` is set to False, only a pdb file will be written.
+ This depends whether or not the user supplied a trajectory file
+ in the first place.
+
+ Parameters
+ ----------
+ basename : str
+ basename for the output coordinate file(s).
+ universe_woH : MDAnalysis universe instance
+ This is the universe *without* hydrogen.
+ dic_OP : ordered dictionnary
+ Each key of this dict is a couple carbon/H, and at the beginning it
+ contains an empty list, e.g.
+ OrderedDict([ ('C1', 'H11): [], ('C1', 'H12'): [], ... ])
+ See function init_dic_OP() below to see how it is organized.
+ dic_lipid : dictionnary
+ Comes from dic_lipids.py. Contains carbon names and helper names needed
+ for reconstructing hydrogens.
+ dic_Cname2Hnames : dictionnary
+ This dict gives the correspondance Cname -> Hname. It is a dict of
+ tuples. If there is more than 1 H for a given C, they need to be
+ *ordered* like in the PDB. e.g. for CHARMM POPC :
+ {'C13': ('H13A', 'H13B', 'H13C'), ..., 'C33': ('H3X', 'H3Y'),
+ ..., 'C216': ('H16R', 'H16S'), ...}
+ dic_corresp_numres_index_dic_OP : dictionnary
+ This dict should contain the correspondance between the numres and
+ the corresponding index in dic_OP.
+ begin: int
+ index of the first frame of trajectory
+ end: int
+ index of the last frame of trajectory
+ traj_file : bool
+ a trajectory output file has to be generated?
+ """
dic_lipids_with_indexes = make_dic_lipids_with_indexes(universe_woH, dic_lipid,
dic_OP)
# Create filenames.
pdbout_filename = basename + ".pdb"
- xtcout_filename = basename + ".xtc"
# Build a new universe with H.
# Build a pandas df with H.
new_df_atoms = build_system_hydrogens(universe_woH, dic_lipid, dic_Cname2Hnames,
@@ -584,26 +616,33 @@ def gen_XTC_calcOP(basename, universe_woH, dic_OP, dic_lipid,
f.write(writers.pandasdf2pdb(new_df_atoms))
# Then create the universe with H from that pdb.
universe_wH = mda.Universe(pdbout_filename)
- # Create an xtc writer.
- print("Writing trajectory with hydrogens in xtc file.")
- newxtc = XTC.XTCWriter(xtcout_filename, len(universe_wH.atoms))
- # Write 1st frame.
- newxtc.write(universe_wH)
-
- # 4) Loop over all frames of the traj *without* H, build Hs and
- # calc OP (ts is a Timestep instance).
- for ts in universe_woH.trajectory[begin:end]:
- print("Dealing with frame {} at {} ps."
- .format(ts.frame, universe_woH.trajectory.time))
- # Build H and update their positions in the universe *with* H (in place).
- # Calculate OPs on the fly while building Hs (dic_OP changed in place).
- build_all_Hs_calc_OP(universe_woH, ts, dic_lipid, dic_Cname2Hnames,
- universe_wH, dic_OP, dic_corresp_numres_index_dic_OP,
- dic_lipids_with_indexes)
- # Write new frame to xtc.
+
+ #Do we need to generate a trajectory file ?
+ if traj_file:
+ xtcout_filename = basename + ".xtc"
+ # Create an xtc writer.
+ print("Writing trajectory with hydrogens in xtc file.")
+ newxtc = XTC.XTCWriter(xtcout_filename, len(universe_wH.atoms))
+ # Write 1st frame.
newxtc.write(universe_wH)
- # Close xtc.
- newxtc.close()
+
+ # 4) Loop over all frames of the traj *without* H, build Hs and
+ # calc OP (ts is a Timestep instance).
+ for ts in universe_woH.trajectory[begin:end]:
+ print("Dealing with frame {} at {} ps."
+ .format(ts.frame, universe_woH.trajectory.time))
+ # Build H and update their positions in the universe *with* H (in place).
+ # Calculate OPs on the fly while building Hs (dic_OP changed in place).
+ build_all_Hs_calc_OP(universe_woH, ts, dic_lipid, dic_Cname2Hnames,
+ universe_wH, dic_OP, dic_corresp_numres_index_dic_OP,
+ dic_lipids_with_indexes)
+ # Write new frame to xtc.
+ newxtc.write(universe_wH)
+ # Close xtc.
+ newxtc.close()
+ # if not, just compute OP in the fast way.
+ else:
+ fast_build_all_Hs_calc_OP(universe_woH, begin, end, dic_OP, dic_lipid, dic_Cname2Hnames)
def is_allHs_present(def_file, lipids_name, dic_ref_CHnames):
|
patrickfuchs/buildH
|
13e27accddf261c3cf8d75967495476da7ff1b23
|
diff --git a/tests/test_core.py b/tests/test_core.py
index fc622c8..cb66148 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -246,7 +246,7 @@ class TestXTCPOPC:
"""Test class for a trajectory (in xtc format) of POPC lipids."""
# Subset of reference data for dic_OP result
- # This used for test_fast_calcOP() and test_gen_XTC_calcOP()
+ # This used for test_fast_calcOP() and test_gen_coordinates_calcOP()
ref_OP = {
# values = 2 lipids x 11 frames
('C2', 'H23'): [ [-0.47293904, -0.48531776, -0.33300023, -0.08279667, -0.49939686,
@@ -294,7 +294,7 @@ class TestXTCPOPC:
def test_fast_calcOP(self, tmp_path):
"""Test fast_build_all_Hs_calc_OP() on a trajectory.
- The results should be indentical to the test_gen_XTC_calcOP() test.
+ The results should be indentical to the test_gen_coordinates_calcOP() test.
Parameters
----------
@@ -314,8 +314,8 @@ class TestXTCPOPC:
assert key in self.dic_OP.keys()
assert_almost_equal(value, self.dic_OP[key])
- def test_gen_XTC_calcOP(self, tmp_path):
- """Test for gen_XTC_calcOP().
+ def test_gen_coordinates_calcOP(self, tmp_path):
+ """Test for gen_coordinates_calcOP().
The results should be indentical to the test_fast_calcOP() test.
@@ -324,9 +324,9 @@ class TestXTCPOPC:
tmp_path : pathlib.Path (Pytest fixture)
path to a unique temporary directory.
"""
- core.gen_XTC_calcOP("test", self.universe_woH, self.dic_OP, self.dic_lipid,
- self.dic_Cname2Hnames, self.dic_corresp_numres_index_dic_OP,
- self.begin, self.end)
+ core.gen_coordinates_calcOP("test", self.universe_woH, self.dic_OP, self.dic_lipid,
+ self.dic_Cname2Hnames, self.dic_corresp_numres_index_dic_OP,
+ self.begin, self.end, True)
# Check statistics
assert_almost_equal(np.mean(self.dic_OP[('C32', 'H321')]), 0.15300163)
|
Generation of xtc with one frame?
When the user provides a single pdb file without any trajectory (no use of `-x`), and the trajectory with H is requested with option `-opx`, buildH generates a pdb and an xtc. The xtc file contains only one frame, it's thus useless. Maybe we can check if `-x` is not used and `-opx` is used to generate only the pdb?
|
0.0
|
13e27accddf261c3cf8d75967495476da7ff1b23
|
[
"tests/test_core.py::TestXTCPOPC::test_gen_coordinates_calcOP"
] |
[
"tests/test_core.py::TestPDBPOPC::test_buildHs_on_1C[0-Hs_coords0]",
"tests/test_core.py::TestPDBPOPC::test_buildHs_on_1C[4-Hs_coords1]",
"tests/test_core.py::TestPDBPOPC::test_buildHs_on_1C[12-Hs_coords2]",
"tests/test_core.py::TestPDBPOPC::test_buildHs_on_1C[23-Hs_coords3]",
"tests/test_core.py::TestPDBPOPC::test_get_indexes[0-helpers_indexes0]",
"tests/test_core.py::TestPDBPOPC::test_get_indexes[12-helpers_indexes1]",
"tests/test_core.py::TestPDBPOPC::test_get_indexes[23-helpers_indexes2]",
"tests/test_core.py::TestPDBPOPC::test_get_indexes[49-helpers_indexes3]",
"tests/test_core.py::TestPDBPOPC::test_make_dic_lipids_with_indexes",
"tests/test_core.py::TestPDBPOPC::test_fast_build_all_Hs_calc_OP",
"tests/test_core.py::TestPDBPOPC::test_reconstruct_Hs_first_frame",
"tests/test_core.py::TestPDBPOPC::test_reconstruct_Hs",
"tests/test_core.py::TestXTCPOPC::test_fast_calcOP",
"tests/test_core.py::TestXTCPOPC::test_check_def_file"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-11 09:59:36+00:00
|
bsd-3-clause
| 4,462 |
|
patrickfuchs__buildH-80
|
diff --git a/buildh/lipids.py b/buildh/lipids.py
index 7bd1413..3a3a632 100644
--- a/buildh/lipids.py
+++ b/buildh/lipids.py
@@ -46,10 +46,10 @@ def read_lipids_topH(filenames):
try:
topol = json.load(json_file)
except Exception as e:
- raise ValueError(f"{filenam_path} is in a bad format.") from e
+ raise ValueError(f"{filenam_path} is in a bad format: " + str(e)) from e
# make sure at least 'resname' key exists
if "resname" not in topol:
- raise ValueError(f"{filenam_path} is in a bad format.")
+ raise ValueError(f"{filenam_path} is in a bad format: keyword 'resname' is missing.")
# Retrieve forcefield and lipid name from the filename
try:
|
patrickfuchs/buildH
|
41d734ccd6b8a8c04c3520f88ac11597174fe8c1
|
diff --git a/tests/test_lipids.py b/tests/test_lipids.py
index 3fa0526..cfaa76a 100644
--- a/tests/test_lipids.py
+++ b/tests/test_lipids.py
@@ -54,4 +54,4 @@ class TestLipids():
bad_file = "Berger_wrongformat.json"
with pytest.raises(ValueError) as err:
lipids.read_lipids_topH([PATH_ROOT_DATA / bad_file])
- assert f"{bad_file} is in a bad format." in str(err.value)
+ assert f"{bad_file} is in a bad format: keyword 'resname' is missing." in str(err.value)
|
Be clearer when there is an error with the user supplied json file
Making a try with a toy (user supplied) json file:
```
$ buildH -c 3Conly.pdb -d 3Conly.def -lt Berger_3Conly.json -l PO3C
usage: buildH [-h] -c COORD [-t TRAJ] -l LIPID [-lt LIPID_TOPOLOGY [LIPID_TOPOLOGY ...]] -d DEFOP [-opx OPDBXTC] [-o OUT]
[-b BEGIN] [-e END] [-pi PICKLE]
buildH: error: Berger_3Conly.json is in a bad format.
```
This error message is not very precise. The user can't find where the error comes from in the json file.
When doing this by hand:
```
>>> f = open("Berger_3Conly.json")
>>> json.load(f)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/fuchs/software/miniconda3/envs/buildh/lib/python3.9/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/home/fuchs/software/miniconda3/envs/buildh/lib/python3.9/json/__init__.py", line 346, in loads
return _default_decoder.decode(s)
File "/home/fuchs/software/miniconda3/envs/buildh/lib/python3.9/json/decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/home/fuchs/software/miniconda3/envs/buildh/lib/python3.9/json/decoder.py", line 353, in raw_decode
obj, end = self.scan_once(s, idx)
json.decoder.JSONDecodeError: Expecting property name enclosed in double quotes: line 4 column 1 (char 61)
```
Here we can see that it comes from line 4 column 1. Easier to debug.
In fact the error was coming from a comma at the end of the last line :
```
{
"resname": ["PO3C"],
"C26": ["CH2", "C25", "C27"],
}
```
It makes me think to warn the user about this: don't put a comma on the last line!
|
0.0
|
41d734ccd6b8a8c04c3520f88ac11597174fe8c1
|
[
"tests/test_lipids.py::TestLipids::test_check_read_lipids_topH_failure"
] |
[
"tests/test_lipids.py::TestLipids::test_check_read_lipids_topH_success"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-04-29 07:33:00+00:00
|
bsd-3-clause
| 4,463 |
|
paulocheque__django-dynamic-fixture-161
|
diff --git a/django_dynamic_fixture/ddf.py b/django_dynamic_fixture/ddf.py
index 1e7f345..dd2d038 100644
--- a/django_dynamic_fixture/ddf.py
+++ b/django_dynamic_fixture/ddf.py
@@ -239,6 +239,7 @@ class DDFLibrary:
import warnings
if name in [None, True]:
name = self.DEFAULT_KEY
+ model_class = self._get_concrete_model(model_class)
if model_class in self.configs and name in self.configs[model_class]:
if not os.getenv('DDF_SHELL_MODE'):
msg = "Override a lesson is an anti-pattern and will turn your test suite very hard to understand."
@@ -250,12 +251,19 @@ class DDFLibrary:
def get_configuration(self, model_class, name=None):
if name is None:
name = self.DEFAULT_KEY
+ model_class = self._get_concrete_model(model_class)
# copy is important because this dict will be updated every time in the algorithm.
config = self.configs.get(model_class, {})
if name != self.DEFAULT_KEY and name not in config.keys():
raise InvalidConfigurationError('There is no lesson for model {} with the name "{}"'.format(get_unique_model_name(model_class), name))
return config.get(name, {}).copy() # default configuration never raises an error
+ def _get_concrete_model(self, model_class):
+ if hasattr(model_class, '_meta') and model_class._meta.proxy:
+ return model_class._meta.concrete_model or model_class
+ else:
+ return model_class
+
def clear(self):
'''Remove all lessons of the library. Util for the DDF tests.'''
self.configs = {}
|
paulocheque/django-dynamic-fixture
|
4ae79700d48c1fb307a7437dc256dd099a080a4a
|
diff --git a/django_dynamic_fixture/models_test.py b/django_dynamic_fixture/models_test.py
index 17d1dde..b36b618 100644
--- a/django_dynamic_fixture/models_test.py
+++ b/django_dynamic_fixture/models_test.py
@@ -352,6 +352,13 @@ class ModelForLibrary(models.Model):
app_label = 'django_dynamic_fixture'
+class ProxyModelForLibrary(ModelForLibrary):
+ class Meta:
+ proxy = True
+ verbose_name = 'Proxy Library'
+ app_label = 'django_dynamic_fixture'
+
+
class ModelWithUniqueCharField(models.Model):
text_unique = models.CharField(max_length=20, unique=True)
diff --git a/django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py b/django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py
index d15639b..95d7961 100644
--- a/django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py
+++ b/django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py
@@ -65,6 +65,24 @@ class TeachAndLessonsTest(DDFTestCase):
assert instance.integer == 1000
assert instance.foreignkey.integer == 1001
+ def test_it_uses_lessons_for_base_model_when_creating_a_proxy_model(self):
+ self.ddf.teach(ModelForLibrary, integer=123)
+ instance = self.ddf.get(ProxyModelForLibrary)
+ assert instance.__class__ is ProxyModelForLibrary
+ assert instance.integer == 123
+
+ def test_it_uses_lessons_for_proxy_models_when_creating_the_base_model(self):
+ self.ddf.teach(ProxyModelForLibrary, integer=456)
+ instance = self.ddf.get(ModelForLibrary)
+ assert instance.__class__ is ModelForLibrary
+ assert instance.integer == 456
+
+ def test_it_uses_lessons_for_proxy_models_when_creating_the_proxy_model(self):
+ self.ddf.teach(ProxyModelForLibrary, integer=789)
+ instance = self.ddf.get(ProxyModelForLibrary)
+ assert instance.__class__ is ProxyModelForLibrary
+ assert instance.integer == 789
+
# Not implemented yet
# def test_teaching_must_store_ddf_configs_too(self):
# self.ddf.teach(ModelForLibrary, fill_nullable_fields=False)
|
Proxy models ignore lessons of their base models
In the [Django docs](https://docs.djangoproject.com/en/4.2/topics/db/models/#proxy-models) it sounds like proxy models and regular models are basically equivalent when it comes to database access. The proxy models just add extra methods or managers that the regular models don't have.
Based on this I was also expecting that DDF reuses the lessons for concrete models for any proxy models they might have. But this doesn't seem to be the case 😞
`G()`etting a proxy model works just fine, but DDF ignores any lessons that I have taught for the concrete base model.
```python
### models.py
from django.db import models
class Thing(models.Model):
value = models.IntegerField()
class ProxyThing(Thing):
class Meta:
proxy = True
### tests.py
from django.test import TestCase
from django_dynamic_fixture import G, teach
from demo.models import ProxyThing, Thing
teach(Thing, value=1234)
class ProxyThingTest(TestCase):
def test_stuff(self):
self.assertEqual(1234, G(ProxyThing).value)
```
When I run the tests using `python manage.py test`, the test fails:
```
Found 1 test(s).
Creating test database for alias 'default'...
System check identified no issues (0 silenced).
F
======================================================================
FAIL: test_stuff (demo.tests.ThingTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\demo\proxy-lessons\tests.py", line 13, in test_stuff
self.assertEqual(1234, proxy.value)
AssertionError: 1234 != 1
----------------------------------------------------------------------
Ran 1 test in 0.002s
FAILED (failures=1)
Destroying test database for alias 'default'...
```
I currently work aroung this by specifically copying the lessons for proxy models from their concrete base models. But it would be nicer if DDF supported this out of the box 😸. Maybe if `DDFLibrary.get_configuration()` and `DDFLibrary.add_configuration()` checked for proxy models themselves?
```python
from django.apps import apps
from django_dynamic_fixture import DDFLibrary
def _copy_lessons_for_proxy_models():
library = DDFLibrary.get_instance()
for model in apps.get_models():
concrete_model = model._meta.concrete_model
if not model._meta.proxy or not concrete_model or concrete_model == model:
continue
config = library.get_configuration(concrete_model)
if config:
library.add_configuration(model, config)
```
|
0.0
|
4ae79700d48c1fb307a7437dc256dd099a080a4a
|
[
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_uses_lessons_for_base_model_when_creating_a_proxy_model",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_uses_lessons_for_proxy_models_when_creating_the_base_model"
] |
[
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::DDFLibraryTest::test_clear",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::DDFLibraryTest::test_add_and_get_configuration_with_name",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::DDFLibraryTest::test_add_and_get_configuration_without_string_name",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::DDFLibraryTest::test_clear_config",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachingAndCustomLessonsTest::test_default_lesson_and_custom_lesson_must_work_together",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachingAndCustomLessonsTest::test_it_must_raise_an_error_if_user_try_to_use_a_not_saved_configuration",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachingAndCustomLessonsTest::test_a_model_can_have_custom_lessons",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachingAndCustomLessonsTest::test_default_lesson_and_custom_lesson_must_work_together_for_different_models",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachingAndCustomLessonsTest::test_custom_lessons_must_not_be_used_if_not_explicity_specified",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachingAndCustomLessonsTest::test_a_model_can_have_many_custom_lessons",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_must_accept_dynamic_values_for_fields_with_unicity",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_allows_to_use_masks_as_lessons_for_unique_integer_fields",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_must_use_lessons_for_internal_dependencies",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_default_lesson_may_be_overrided_although_it_is_an_anti_pattern",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_must_raise_an_error_if_try_to_set_a_static_value_to_a_field_with_unicity",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_must_NOT_propagate_lessons_for_internal_dependencies",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_uses_lessons_for_proxy_models_when_creating_the_proxy_model",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_teach_a_default_lesson_for_a_model",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_must_NOT_raise_an_error_if_user_try_to_use_a_not_saved_default_configuration",
"django_dynamic_fixture/tests/test_ddf_teaching_and_lessons.py::TeachAndLessonsTest::test_it_allows_to_use_masks_as_lessons_for_unique_char_fields"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-15 21:06:39+00:00
|
mit
| 4,464 |
|
pavdmyt__yaspin-35
|
diff --git a/HISTORY.rst b/HISTORY.rst
index e42a4e8..f811286 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,6 +1,12 @@
Release History
===============
+0.14.3 / 2019-05-12
+-------------------
+
+* fix(#29): race condition between spinner thread and ``write()``
+
+
0.14.2 / 2019-04-27
-------------------
diff --git a/setup.cfg b/setup.cfg
index 414ae9c..710350c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 0.14.2
+current_version = 0.14.3
[metadata]
description-file = README.rst
diff --git a/yaspin/__version__.py b/yaspin/__version__.py
index 745162e..23f0070 100644
--- a/yaspin/__version__.py
+++ b/yaspin/__version__.py
@@ -1,1 +1,1 @@
-__version__ = "0.14.2"
+__version__ = "0.14.3"
diff --git a/yaspin/core.py b/yaspin/core.py
index f61bdc0..803daf7 100644
--- a/yaspin/core.py
+++ b/yaspin/core.py
@@ -76,6 +76,7 @@ class Yaspin(object):
self._hide_spin = None
self._spin_thread = None
self._last_frame = None
+ self._stdout_lock = threading.Lock()
# Signals
@@ -247,43 +248,47 @@ class Yaspin(object):
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and not self._hide_spin.is_set():
- # set the hidden spinner flag
- self._hide_spin.set()
+ with self._stdout_lock:
+ # set the hidden spinner flag
+ self._hide_spin.set()
- # clear the current line
- sys.stdout.write("\r")
- self._clear_line()
+ # clear the current line
+ sys.stdout.write("\r")
+ self._clear_line()
- # flush the stdout buffer so the current line can be rewritten to
- sys.stdout.flush()
+ # flush the stdout buffer so the current line
+ # can be rewritten to
+ sys.stdout.flush()
def show(self):
"""Show the hidden spinner."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and self._hide_spin.is_set():
- # clear the hidden spinner flag
- self._hide_spin.clear()
+ with self._stdout_lock:
+ # clear the hidden spinner flag
+ self._hide_spin.clear()
- # clear the current line so the spinner is not appended to it
- sys.stdout.write("\r")
- self._clear_line()
+ # clear the current line so the spinner is not appended to it
+ sys.stdout.write("\r")
+ self._clear_line()
def write(self, text):
"""Write text in the terminal without breaking the spinner."""
# similar to tqdm.write()
# https://pypi.python.org/pypi/tqdm#writing-messages
- sys.stdout.write("\r")
- self._clear_line()
+ with self._stdout_lock:
+ sys.stdout.write("\r")
+ self._clear_line()
- _text = to_unicode(text)
- if PY2:
- _text = _text.encode(ENCODING)
+ _text = to_unicode(text)
+ if PY2:
+ _text = _text.encode(ENCODING)
- # Ensure output is bytes for Py2 and Unicode for Py3
- assert isinstance(_text, builtin_str)
+ # Ensure output is bytes for Py2 and Unicode for Py3
+ assert isinstance(_text, builtin_str)
- sys.stdout.write("{0}\n".format(_text))
+ sys.stdout.write("{0}\n".format(_text))
def ok(self, text="OK"):
"""Set Ok (success) finalizer to a spinner."""
@@ -306,7 +311,8 @@ class Yaspin(object):
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
- sys.stdout.write(self._last_frame)
+ with self._stdout_lock:
+ sys.stdout.write(self._last_frame)
def _spin(self):
while not self._stop_spin.is_set():
@@ -321,9 +327,10 @@ class Yaspin(object):
out = self._compose_out(spin_phase)
# Write
- sys.stdout.write(out)
- self._clear_line()
- sys.stdout.flush()
+ with self._stdout_lock:
+ sys.stdout.write(out)
+ self._clear_line()
+ sys.stdout.flush()
# Wait
time.sleep(self._interval)
|
pavdmyt/yaspin
|
dd2fd2187fbdef81aede3ea6d2053c1ccd2d8034
|
diff --git a/tests/test_in_out.py b/tests/test_in_out.py
index 180b642..0fe6e49 100644
--- a/tests/test_in_out.py
+++ b/tests/test_in_out.py
@@ -8,7 +8,9 @@ Checks that all input data is converted to unicode.
And all output data is converted to builtin str type.
"""
+import re
import sys
+import time
import pytest
@@ -159,3 +161,22 @@ def test_hide_show(capsys, text, request):
# ensure that text was cleared before resuming the spinner
assert out[:4] == "\r\033[K"
+
+
+def test_spinner_write_race_condition(capsys):
+ # test that spinner text does not overwrite write() contents
+ # this generally happens when the spinner thread writes
+ # between write()'s \r and the text it actually wants to write
+
+ sp = yaspin(text="aaaa")
+ sp.start()
+ sp._interval = 0.0
+ start_time = time.time()
+ while time.time() - start_time < 3.0:
+ sp.write("bbbb")
+ sp.stop()
+
+ out, _ = capsys.readouterr()
+ assert "aaaa" in out # spinner text is present
+ assert "bbbb" in out # write() text is present
+ assert not re.search(r"aaaa[^\rb]*bbbb", out)
|
Race condition between spinner thread and write()
When I use `write()` a lot on a spinner it often happens to me that the written text is displayed after the spinner message, like this:
```
[...]
noot noot
⠋ spinning...noot noot
noot noot
[...]
```
I used the script below to recreate the problem. It takes some time (400-800 writes) with it, but happens to me far more often with real world applications.
```python
import random
import time
import yaspin
with yaspin.yaspin() as sp:
sp.text = 'spinning...'
while True:
sp.write('noot noot')
time.sleep(0.051 + random.randint(0, 10) / 100)
```
I think this happens because of concurrent access to `sys.stdout.write()` by the spinning thread and the spinners `write()`. This should be solvable by using a `threading.Lock()` and guarding access to sys.stdout.write() with it. In a quick local hack-fix-test it worked for me by doing this in `write()` and `_spin()`, but there are a lot more functions that would need guarding, if done right.
|
0.0
|
dd2fd2187fbdef81aede3ea6d2053c1ccd2d8034
|
[
"tests/test_in_out.py::test_spinner_write_race_condition"
] |
[
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'empty",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'empty",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['ascii",
"tests/test_in_out.py::test_repr['ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'non-ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'non-ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['non-ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['non-ascii",
"tests/test_in_out.py::test_repr['non-ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'List[]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'List[bytes]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'List[unicode]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[str]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[str]",
"tests/test_in_out.py::test_repr['empty'-'List[str]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[bytes]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[unicode]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[str]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[str]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[str]",
"tests/test_in_out.py::test_write['non-ascii",
"tests/test_in_out.py::test_hide_show['non-ascii",
"tests/test_in_out.py::test_repr['empty'-'non-ascii",
"tests/test_in_out.py::test_write['ascii",
"tests/test_in_out.py::test_hide_show['ascii",
"tests/test_in_out.py::test_repr['empty'-'ascii",
"tests/test_in_out.py::test_repr['empty'-'empty",
"tests/test_in_out.py::test_write['empty']",
"tests/test_in_out.py::test_hide_show['empty']",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-bold]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-18 15:28:04+00:00
|
mit
| 4,465 |
|
paver__paver-174
|
diff --git a/docs/source/pavement.rst b/docs/source/pavement.rst
index cf09246..9dae827 100644
--- a/docs/source/pavement.rst
+++ b/docs/source/pavement.rst
@@ -203,6 +203,22 @@ For sharing, following must be fullfilled:
Otherwise, ``PavementError`` is raised.
+You can combine both ``@consume_args`` and ``@cmdopts`` together::
+
+ @task
+ @cmdopts([
+ ('username=', 'u', 'Username to use when logging in to the servers')
+ ])
+ @consume_args
+ def exec(options):
+ pass
+
+
+* ``paver exec -u root`` will result in ``options.username = 'root', options.args = []``
+* ``paver exec -u root production`` will result in ``options.username = 'root', options.args = ['production']``
+* ``paver exec production -u root`` will result in ``options.args = ['production', '-u', 'root']`` with no ``options.username`` attribute.
+* ``paver exec -u root production -u other`` will result in ``options.username = 'root', options.args = ['production', '-u', 'other']``
+
Hiding tasks
---------------
diff --git a/paver/tasks.py b/paver/tasks.py
index 7300969..09f7d4e 100644
--- a/paver/tasks.py
+++ b/paver/tasks.py
@@ -741,7 +741,8 @@ def _parse_command_line(args):
if not isinstance(task, Task):
raise BuildFailure("%s is not a Task" % taskname)
- if task.consume_args != float('inf'):
+ if task.user_options or task.consume_args != float('inf'):
+ # backwards compatibility around mixing of @cmdopts & @consume_args
args = task.parse_args(args)
if task.consume_args > 0:
args = _consume_nargs(task, args)
|
paver/paver
|
45609b2ad1901144e68746b903345d7de1d03404
|
diff --git a/paver/tests/test_tasks.py b/paver/tests/test_tasks.py
index 7b971f2..1df7c4e 100644
--- a/paver/tests/test_tasks.py
+++ b/paver/tests/test_tasks.py
@@ -404,6 +404,74 @@ def test_consume_args():
tasks._process_commands("t3 -v 1".split())
assert t3.called
+def test_consume_args_and_options():
+ @tasks.task
+ @tasks.cmdopts([
+ ("foo=", "f", "Help for foo")
+ ])
+ @tasks.consume_args
+ def t1(options):
+ assert options.foo == "1"
+ assert options.t1.foo == "1"
+ assert options.args == ['abc', 'def']
+
+ environment = _set_environment(t1=t1)
+ tasks._process_commands([
+ 't1', '--foo', '1', 'abc', 'def',
+ ])
+ assert t1.called
+
+def test_consume_args_and_options_2():
+ @tasks.task
+ @tasks.cmdopts([
+ ("foo=", "f", "Help for foo")
+ ])
+ @tasks.consume_args
+ def t1(options):
+ assert not hasattr(options, 'foo')
+ assert not hasattr(options.t1, 'foo')
+ assert options.args == ['abc', 'def', '--foo', '1']
+
+ environment = _set_environment(t1=t1)
+ tasks._process_commands([
+ 't1', 'abc', 'def', '--foo', '1',
+ ])
+ assert t1.called
+
+def test_consume_args_and_options_3():
+ @tasks.task
+ @tasks.cmdopts([
+ ("foo=", "f", "Help for foo")
+ ])
+ @tasks.consume_args
+ def t1(options):
+ assert options.foo == "1"
+ assert options.t1.foo == "1"
+ assert options.args == []
+
+ environment = _set_environment(t1=t1)
+ tasks._process_commands([
+ 't1', '--foo', '1',
+ ])
+ assert t1.called
+
+def test_consume_args_and_options_conflict():
+ @tasks.task
+ @tasks.cmdopts([
+ ("foo=", "f", "Help for foo")
+ ])
+ @tasks.consume_args
+ def t1(options):
+ assert options.foo == "1"
+ assert options.t1.foo == "1"
+ assert options.args == ['abc', 'def', '--foo', '2']
+
+ environment = _set_environment(t1=t1)
+ tasks._process_commands([
+ 't1', '--foo', '1', 'abc', 'def', '--foo', '2',
+ ])
+ assert t1.called
+
def test_consume_nargs():
# consume all args on first task
@tasks.task
|
Define behavior with both @cmdopts and @consume_args set
Latter now overwrites former, which is not what one generally wants.
Parse for cmdopts and remove it from args.
|
0.0
|
45609b2ad1901144e68746b903345d7de1d03404
|
[
"paver/tests/test_tasks.py::test_consume_args_and_options",
"paver/tests/test_tasks.py::test_consume_args_and_options_2",
"paver/tests/test_tasks.py::test_consume_args_and_options_3",
"paver/tests/test_tasks.py::test_consume_args_and_options_conflict"
] |
[
"paver/tests/test_tasks.py::test_basic_dependencies",
"paver/tests/test_tasks.py::test_longname_resolution_in_dependencies",
"paver/tests/test_tasks.py::test_chained_dependencies",
"paver/tests/test_tasks.py::test_backwards_compatible_needs",
"paver/tests/test_tasks.py::test_tasks_dont_repeat",
"paver/tests/test_tasks.py::test_basic_command_line",
"paver/tests/test_tasks.py::test_list_tasks",
"paver/tests/test_tasks.py::test_environment_insertion",
"paver/tests/test_tasks.py::test_add_options_to_environment",
"paver/tests/test_tasks.py::test_shortname_access",
"paver/tests/test_tasks.py::test_longname_access",
"paver/tests/test_tasks.py::test_task_command_line_options",
"paver/tests/test_tasks.py::test_setting_of_options_with_equals",
"paver/tests/test_tasks.py::test_options_inherited_via_needs",
"paver/tests/test_tasks.py::test_options_inherited_via_needs_even_from_grandparents",
"paver/tests/test_tasks.py::test_options_shouldnt_overlap",
"paver/tests/test_tasks.py::test_options_shouldnt_overlap_when_bad_task_specified",
"paver/tests/test_tasks.py::test_options_may_overlap_if_explicitly_allowed",
"paver/tests/test_tasks.py::test_exactly_same_parameters_must_be_specified_in_order_to_allow_sharing",
"paver/tests/test_tasks.py::test_dest_parameter_should_map_opt_to_property",
"paver/tests/test_tasks.py::test_dotted_options",
"paver/tests/test_tasks.py::test_dry_run",
"paver/tests/test_tasks.py::test_consume_args",
"paver/tests/test_tasks.py::test_consume_nargs",
"paver/tests/test_tasks.py::test_consume_nargs_and_options",
"paver/tests/test_tasks.py::test_optional_args_in_tasks",
"paver/tests/test_tasks.py::test_debug_logging",
"paver/tests/test_tasks.py::test_base_logging",
"paver/tests/test_tasks.py::test_error_show_up_no_matter_what",
"paver/tests/test_tasks.py::test_all_messages_for_a_task_are_captured",
"paver/tests/test_tasks.py::test_messages_with_formatting_and_no_args_still_work",
"paver/tests/test_tasks.py::test_alternate_pavement_option",
"paver/tests/test_tasks.py::test_captured_output_shows_up_on_exception",
"paver/tests/test_tasks.py::test_calling_subpavement",
"paver/tests/test_tasks.py::test_task_finders",
"paver/tests/test_tasks.py::test_calling_a_function_rather_than_task",
"paver/tests/test_tasks.py::test_depending_on_a_function_rather_than_task",
"paver/tests/test_tasks.py::test_description_retrieval_trial",
"paver/tests/test_tasks.py::test_description_empty_without_docstring",
"paver/tests/test_tasks.py::test_description_retrieval_first_sentence",
"paver/tests/test_tasks.py::test_description_retrieval_first_sentence_even_with_version_numbers",
"paver/tests/test_tasks.py::test_auto_task_is_not_run_with_noauto",
"paver/tests/test_tasks.py::test_auto_task_is_run_when_present",
"paver/tests/test_tasks.py::test_task_can_be_called_repeatedly",
"paver/tests/test_tasks.py::test_options_passed_to_task",
"paver/tests/test_tasks.py::test_calling_task_with_option_arguments",
"paver/tests/test_tasks.py::test_calling_task_with_arguments_do_not_overwrite_it_for_other_tasks",
"paver/tests/test_tasks.py::test_options_might_be_provided_if_task_might_be_called",
"paver/tests/test_tasks.py::test_calling_task_with_arguments",
"paver/tests/test_tasks.py::test_calling_task_with_empty_arguments",
"paver/tests/test_tasks.py::test_calling_nonconsuming_task_with_arguments",
"paver/tests/test_tasks.py::test_options_may_overlap_between_multiple_tasks_even_when_specified_in_reverse_order",
"paver/tests/test_tasks.py::test_options_might_be_shared_both_way",
"paver/tests/test_tasks.py::test_paver_doesnt_crash_on_task_function_with_annotations"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-08-23 14:20:07+00:00
|
bsd-3-clause
| 4,466 |
|
pawelzny__dotty_dict-44
|
diff --git a/dotty_dict/dotty_dict.py b/dotty_dict/dotty_dict.py
index 96d47cf..b806aa9 100644
--- a/dotty_dict/dotty_dict.py
+++ b/dotty_dict/dotty_dict.py
@@ -127,7 +127,10 @@ class Dotty:
except ValueError:
raise KeyError("List index must be an integer, got {}".format(it))
if idx < len(data):
- return get_from(items, data[idx])
+ if len(items) > 0:
+ return get_from(items, data[idx])
+ else:
+ return data[idx]
else:
raise IndexError("List index out of range")
# /end Handle embedded lists
|
pawelzny/dotty_dict
|
96d795ba81eb2d785c18c971ae53a195716c084a
|
diff --git a/tests/test_list_in_dotty.py b/tests/test_list_in_dotty.py
index a3feb5c..583ac25 100644
--- a/tests/test_list_in_dotty.py
+++ b/tests/test_list_in_dotty.py
@@ -25,9 +25,13 @@ class TestListInDotty(unittest.TestCase):
{
'subfield1': [{'subsubfield': 'Value of sub subfield (item 0)'}]
}
- ]
+ ],
+ 'field6': ['a', 'b']
})
+ def test_root_level_list_element(self):
+ self.assertEqual(self.dot['field6.0'], 'a')
+
def test_access_subfield1_of_field3(self):
self.assertEqual(self.dot['field3.0.subfield1'], 'Value of subfield1 (item 0)')
self.assertEqual(self.dot['field3.1.subfield1'], 'Value of subfield1 (item 1)')
|
Accessing root level list items by index fails
Thanks for the library! Using latest version (v1.1.1), this test fails
```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from dotty_dict import dotty_l as dotty
class TestListInDotty(unittest.TestCase):
def setUp(self):
self.dot = dotty({
'field1': 'Value of F1',
'field2': 'Value of F2',
'field3': [
{
'subfield1': 'Value of subfield1 (item 0)',
'subfield2': 'Value of subfield2 (item 0)'
},
{
'subfield1': 'Value of subfield1 (item 1)',
'subfield2': 'Value of subfield2 (item 1)'
},
],
'field4': 'Not wanted',
'field5': [
{
'subfield1': [{'subsubfield': 'Value of sub subfield (item 0)'}]
}
],
'field6': ['a', 'b']
})
def test_root_level_list_element(self):
self.assertEqual(self.dot['field6.0'], 'a')
```
This fails on `dotty_dict.py", line 122, in get_from` since no subfield is specified
```
it = items.pop(0)
IndexError: pop from empty list
```
|
0.0
|
96d795ba81eb2d785c18c971ae53a195716c084a
|
[
"tests/test_list_in_dotty.py::TestListInDotty::test_root_level_list_element"
] |
[
"tests/test_list_in_dotty.py::TestListInDotty::test_access_multidimensional_lists",
"tests/test_list_in_dotty.py::TestListInDotty::test_access_sub_sub_field",
"tests/test_list_in_dotty.py::TestListInDotty::test_access_subfield1_of_field3",
"tests/test_list_in_dotty.py::TestListInDotty::test_assert_index_error_if_index_is_out_of_range",
"tests/test_list_in_dotty.py::TestListInDotty::test_assert_key_error_if_index_is_not_integer",
"tests/test_list_in_dotty.py::TestListInDotty::test_delete_subfield",
"tests/test_list_in_dotty.py::TestListInDotty::test_dotty_contains_subfield_of_field",
"tests/test_list_in_dotty.py::TestListInDotty::test_dotty_not_contains_out_of_range_subfield",
"tests/test_list_in_dotty.py::TestListInDotty::test_set_subfield_in_list",
"tests/test_list_in_dotty.py::TestListInDotty::test_update_subfield_in_list"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-07-17 08:56:03+00:00
|
mit
| 4,467 |
|
pberkes__big_O-44
|
diff --git a/big_o/complexities.py b/big_o/complexities.py
index a75e4ab..7c58e5d 100755
--- a/big_o/complexities.py
+++ b/big_o/complexities.py
@@ -48,14 +48,21 @@ class ComplexityClass(object):
tot = 0
for i in range(len(self.coeff)):
tot += self.coeff[i] * x[:, i]
- return tot
+ return self._inverse_transform_time(tot)
+
+ def coefficients(self):
+ """ Return coefficients in standard form. """
+ if self.coeff is None:
+ raise NotFittedError()
+ return self.coeff
def __str__(self):
prefix = '{}: '.format(self.__class__.__name__)
if self.coeff is None:
- return prefix + ': not yet fitted'
- return prefix + self.format_str().format(*tuple(self.coeff)) + ' (sec)'
+ return prefix + 'not yet fitted'
+ return prefix + self.format_str().format(
+ *self.coefficients()) + ' (sec)'
# --- abstract methods
@@ -80,6 +87,12 @@ class ComplexityClass(object):
"""
return t
+ def _inverse_transform_time(self, t):
+ """ Inverse transform time as needed for compute.
+ (e.g., t->exp(t)) for exponential class.
+ """
+ return t
+
def __gt__(self, other):
return self.order > other.order
@@ -176,10 +189,26 @@ class Polynomial(ComplexityClass):
def _transform_time(self, t):
return np.log(t)
+ def _inverse_transform_time(self, t):
+ return np.exp(t)
+
@classmethod
def format_str(cls):
return 'time = {:.2G} * x^{:.2G}'
+ def coefficients(self):
+ """ Return coefficients in standard form. """
+ # The polynomial is stored in the format
+ # exp(a)*n^b where [a, b] are the coefficients
+ # Technical full format is exp(a+b*ln(n))
+ #
+ # Standard form is a*n^b
+ if self.coeff is None:
+ raise NotFittedError()
+
+ a, b = self.coeff
+ return np.exp(a), b
+
class Exponential(ComplexityClass):
order = 80
@@ -190,10 +219,26 @@ class Exponential(ComplexityClass):
def _transform_time(self, t):
return np.log(t)
+ def _inverse_transform_time(self, t):
+ return np.exp(t)
+
@classmethod
def format_str(cls):
return 'time = {:.2G} * {:.2G}^n'
+ def coefficients(self):
+ """ Return coefficients in standard form. """
+ # The polynomial is stored in the format
+ # exp(a)*exp(b)^n where [a, b] are the coefficients
+ # Technical full format is exp(a+b*n)
+ #
+ # Standard form is a*b^n
+ if self.coeff is None:
+ raise NotFittedError()
+
+ a, b = self.coeff
+ return np.exp(a), np.exp(b)
+
ALL_CLASSES = [Constant, Linear, Quadratic, Cubic, Polynomial,
Logarithmic, Linearithmic, Exponential]
|
pberkes/big_O
|
82b8bb318f93a8b7664d989ec069973291929823
|
diff --git a/big_o/test/test_complexities.py b/big_o/test/test_complexities.py
index 4de6035..2e68399 100644
--- a/big_o/test/test_complexities.py
+++ b/big_o/test/test_complexities.py
@@ -1,6 +1,6 @@
import unittest
import numpy as np
-from numpy.testing import assert_array_almost_equal
+from numpy.testing import assert_allclose
from big_o import complexities
@@ -8,15 +8,28 @@ from big_o import complexities
class TestComplexities(unittest.TestCase):
def test_compute(self):
+ desired = [
+ (lambda x: 2.+x*0., complexities.Constant),
+ (lambda x: 5.*x+3., complexities.Linear),
+ (lambda x: 8.1*x**2.+0.9, complexities.Quadratic),
+ (lambda x: 1.0*x**3+11.0, complexities.Cubic),
+ (lambda x: 5.2*x**2.5, complexities.Polynomial),
+ (lambda x: 8.5*np.log(x)+99.0, complexities.Logarithmic),
+ (lambda x: 1.7*x*np.log(x)+2.74, complexities.Linearithmic),
+ (lambda x: 3.14**x, complexities.Exponential)
+ ]
+
x = np.linspace(10, 100, 100)
- y = 3.0 * x + 2.0
- linear = complexities.Linear()
- linear.fit(x, y)
- assert_array_almost_equal(linear.compute(x), y, 10)
+ for f, class_ in desired:
+ y = f(x)
+ complexity = class_()
+ complexity.fit(x, y)
+ assert_allclose(y, complexity.compute(x),
+ err_msg = "compute() failed to match expected values for class %r" % class_)
def test_not_fitted(self):
- linear = complexities.Linear()
- self.assertRaises(complexities.NotFittedError, linear.compute, 100)
+ for class_ in complexities.ALL_CLASSES:
+ self.assertRaises(complexities.NotFittedError, class_().compute, 100)
def test_str_includes_units(self):
x = np.linspace(10, 100, 100)
|
Polynomial.compute() and Exponential.compute() Return Incorrect Results
## Description
`big_o.complexities.Polynomial.compute()` and `big_o.complexities.Exponential.compute()` consistently return unreasonable values.
This issue is present both when the class is used by itself, and when it is returned by `big_o.big_o()`
## Steps to reproduce:
Run the following example code to test the `Polynomial` class:
```python
import big_o
import numpy as np
polynomial_complexity = big_o.complexities.Polynomial()
n = np.array(range(1,11)) # array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
t = 3.7*n**4.5
# array([3.70000000e+00, 8.37214429e+01, 5.19095627e+02, 1.89440000e+03,
# 5.17090720e+03, 1.17457932e+04, 2.35040609e+04, 4.28653788e+04,
# 7.28271000e+04, 1.17004273e+05])
polynomial_complexity.fit(n, t) # 2.753078336902666e-29
t_computed = polynomial_complexity.compute(n)
# array([ 1.30833282, 4.42749513, 6.25208812, 7.54665744, 8.55080343,
# 9.37125043, 10.06492849, 10.66581976, 11.19584342, 11.66996574])
```
Observe `t_computed` is very different from the values `t`
The issue can be plotted as follows:
```python
import matplotlib.pyplot as plt
plt.plot(n, t, "-", n, t_computed, "--")
plt.show()
```

In blue is `t`, the original function. In orange is `t_computed`.
Observe that, while `t` is consistent with a polynomial function, the `t_computed` line is negative and roughly linear/constant. The `t_computed` line is not consistent with a polynomial.
## Expected Behavior:
Result of `big_o.complexities.Polynomial()` and `big_o.complexities.Exponential()` should resemble a polynomial/exponential function that matches the fitted data.
|
0.0
|
82b8bb318f93a8b7664d989ec069973291929823
|
[
"big_o/test/test_complexities.py::TestComplexities::test_compute"
] |
[
"big_o/test/test_complexities.py::TestComplexities::test_not_fitted",
"big_o/test/test_complexities.py::TestComplexities::test_str_includes_units"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-28 03:11:06+00:00
|
bsd-3-clause
| 4,468 |
|
pbs__pycaption-239
|
diff --git a/README.rst b/README.rst
index ffbcc1c..3fbd233 100644
--- a/README.rst
+++ b/README.rst
@@ -14,6 +14,10 @@ For details, see the `documentation <http://pycaption.readthedocs.org>`__.
Changelog
---------
+1.0.7
+^^^^^
+- Fixed issue with SCC paint-on buffer not being cleared after storing
+
1.0.6
^^^^^
- Added MicroDVD format
diff --git a/pycaption/scc/__init__.py b/pycaption/scc/__init__.py
index 5e11e83..493a36b 100644
--- a/pycaption/scc/__init__.py
+++ b/pycaption/scc/__init__.py
@@ -285,6 +285,8 @@ class SCCReader(BaseReader):
if not self.buffer_dict['paint'].is_empty():
self.caption_stash.create_and_store(
self.buffer_dict['paint'], self.time)
+ self.buffer_dict['paint'] = \
+ self.node_creator_factory.new_creator()
def _translate_line(self, line):
# ignore blank lines
@@ -415,10 +417,9 @@ class SCCReader(BaseReader):
if not self.buffer.is_empty():
self._roll_up()
- # clear screen
+ # 942c - Erase Displayed Memory - Clear the current screen of any
+ # displayed captions or text.
elif word == '942c':
- self.roll_rows = []
-
# XXX - The 942c command has nothing to do with paint-ons
# This however is legacy code, and will break lots of tests if
# the proper buffer (self.buffer) is used.
@@ -427,13 +428,11 @@ class SCCReader(BaseReader):
if not self.buffer_dict['paint'].is_empty():
self.caption_stash.create_and_store(
self.buffer_dict['paint'], self.time)
- self.buffer = self.node_creator_factory.new_creator()
-
- # attempt to add proper end time to last caption(s)
+ self.buffer_dict['paint'] = \
+ self.node_creator_factory.new_creator()
self.caption_stash.correct_last_timing(
self.time_translator.get_time())
-
- # if command not one of the aforementioned, add to buffer
+ # If command is not one of the aforementioned, add it to buffer
else:
self.buffer.interpret_command(word)
diff --git a/setup.py b/setup.py
index dbd59ce..7eae904 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ dependencies = [
setup(
name='pycaption',
- version='1.0.6',
+ version='1.0.7',
description='Closed caption converter',
long_description=open(README_PATH).read(),
author='Joe Norton',
|
pbs/pycaption
|
3f377a32eb04a0ef79548d8f595092e249499bb4
|
diff --git a/tests/samples/scc.py b/tests/samples/scc.py
index b2f3ac2..27c4946 100644
--- a/tests/samples/scc.py
+++ b/tests/samples/scc.py
@@ -293,3 +293,24 @@ Scenarist_SCC V1.0
00:04:36;06 9420 9420 9152 9152 cdc1 4a4f 5220 46d5 cec4 49ce c720 d052 4fd6 49c4 45c4 20c2 d980 2026
"""
+
+SAMPLE_SCC_MULTIPLE_FORMATS = """\
+Scenarist_SCC V1.0
+
+00:00:00;00 942c 1c2c
+
+00:00:00;08 9429 9152 97a2 a843 ece9 e56e f4a7 7320 d6ef e9e3 e529 91f2 97a2 52e5 6de5 6d62 e5f2 20f4 6861 f420 64e5 67f2 e5e5 9252 97a2 79ef 7520 67ef f420 e96e 20f4 61f8 61f4 e9ef 6ebf 9420 9152 97a2 a8c4 616e 6e79 2980 91f2 97a2 4fe6 20e3 ef75 f273 e520 79ef 7520 64ef 6ea7 f480 9252 97a2 62e5 e361 7573 e520 79ef 7520 64e9 646e a7f4 a180
+
+00:00:02;15 9420 942c 942f 9420 91d0 9723 d9ef 75f2 20ea ef62 20e9 736e a7f4 2064 efe9 6e67 2068 61f2 6480 9170 9723 f7ef f26b aeae ae80
+
+00:00:04;15 9420 942c 942f 9420 91d0 97a2 aeae aee9 f4a7 7320 6d61 6be9 6e67 20f4 68e5 6d20 64ef 2068 61f2 6480 9170 97a2 f7ef f26b aeae ae80
+
+00:00:06;03 9420 942c 942f 9420 91d0 97a2 aeae ae61 6e64 2067 e5f4 f4e9 6e67 2070 61e9 6420 e6ef f220 e9f4 ae80
+
+00:00:08;04 9420 942c 942f 9420 91d0 97a1 a8d6 4f29 9170 97a1 d36e 6170 2061 6e64 2073 eff2 f420 79ef 75f2 20e5 f870 e56e 73e5 7320 f4ef 92d0 97a1 7361 76e5 20ef 76e5 f220 a434 2cb6 b0b0 2061 f420 f461 f820 f4e9 6de5 ae80
+
+00:00:09;18 9420 942c 942f 9420 9152 51d5 4943 cbc2 4f4f cbd3 ae20 c2c1 43cb 49ce c720 d94f d5ae
+
+00:00:13;04 9420 942c 942f
+
+"""
\ No newline at end of file
diff --git a/tests/test_scc.py b/tests/test_scc.py
index 0abbae7..23084fb 100644
--- a/tests/test_scc.py
+++ b/tests/test_scc.py
@@ -5,7 +5,7 @@ from pycaption.geometry import UnitEnum, HorizontalAlignmentEnum, VerticalAlignm
from pycaption.scc.specialized_collections import (InstructionNodeCreator,
TimingCorrectingCaptionList)
-from pycaption import SCCReader, CaptionReadNoCaptions
+from pycaption import SCCReader, CaptionReadNoCaptions, CaptionNode
from pycaption.scc.state_machines import DefaultProvidingPositionTracker
from tests.samples.scc import (
@@ -14,7 +14,7 @@ from tests.samples.scc import (
SAMPLE_SCC_WITH_ITALICS, SAMPLE_SCC_EMPTY, SAMPLE_SCC_ROLL_UP_RU2,
SAMPLE_SCC_PRODUCES_BAD_LAST_END_TIME, SAMPLE_NO_POSITIONING_AT_ALL_SCC,
SAMPLE_SCC_NO_EXPLICIT_END_TO_LAST_CAPTION, SAMPLE_SCC_EOC_FIRST_COMMAND,
- SAMPLE_SCC_WITH_EXTENDED_CHARACTERS
+ SAMPLE_SCC_WITH_EXTENDED_CHARACTERS, SAMPLE_SCC_MULTIPLE_FORMATS
)
TOLERANCE_MICROSECONDS = 500 * 1000
@@ -203,6 +203,35 @@ class CoverageOnlyTestCase(unittest.TestCase):
'>> Bike Iowa, your source for']
self.assertEqual(expected_texts, actual_texts)
+ def test_multiple_formats(self):
+ # Test for captions that contain both pop on and paint on formats to
+ # ensure the paint on lines are not repeated
+ expected_text_lines = [
+ "(Client's Voice)",
+ 'Remember that degree',
+ 'you got in taxation?',
+ '(Danny)',
+ "Of course you don't",
+ "because you didn't!",
+ "Your job isn't doing hard",
+ 'work...',
+ "...it's making them do hard",
+ 'work...',
+ '...and getting paid for it.',
+ '(VO)',
+ 'Snap and sort your expenses to',
+ 'save over $4,600 at tax time.',
+ 'QUICKBOOKS. BACKING YOU.']
+
+ captions = SCCReader().read(SAMPLE_SCC_MULTIPLE_FORMATS)\
+ .get_captions('en-US')
+ text_lines = [node.content
+ for caption in captions
+ for node in caption.nodes
+ if node.type_ == CaptionNode.TEXT]
+
+ self.assertEqual(expected_text_lines, text_lines)
+
def test_freeze_semicolon_spec_time(self):
scc1 = SCCReader().read(SAMPLE_SCC_ROLL_UP_RU2)
captions = scc1.get_captions('en-US')
|
SCC file content error
I tried to read a SCC file and print the content, the content looks repetitive
(Client's Voice)\nRemember that degree\nyou got in taxation?
(Client's Voice)\nRemember that degree\nyou got in taxation?
(Client's Voice)\nRemember that degree\nyou got in taxation?
(Client's Voice)\nRemember that degree\nyou got in taxation?
(Client's Voice)\nRemember that degree\nyou got in taxation?
(Client's Voice)\nRemember that degree\nyou got in taxation?
(Client's Voice)\nRemember that degree\nyou got in taxation?
[TV1_HC47974.txt](https://github.com/pbs/pycaption/files/2863380/TV1_HC47974.txt)
Could you please let me know the issue.
|
0.0
|
3f377a32eb04a0ef79548d8f595092e249499bb4
|
[
"tests/test_scc.py::CoverageOnlyTestCase::test_multiple_formats"
] |
[
"tests/test_scc.py::SCCReaderTestCase::test_caption_length",
"tests/test_scc.py::SCCReaderTestCase::test_correct_last_bad_timing",
"tests/test_scc.py::SCCReaderTestCase::test_default_positioning_when_no_positioning_is_specified",
"tests/test_scc.py::SCCReaderTestCase::test_detection",
"tests/test_scc.py::SCCReaderTestCase::test_empty_file",
"tests/test_scc.py::SCCReaderTestCase::test_italics_are_properly_read",
"tests/test_scc.py::SCCReaderTestCase::test_proper_timestamps",
"tests/test_scc.py::SCCReaderTestCase::test_removed_extended_characters_ascii_duplicate",
"tests/test_scc.py::SCCReaderTestCase::test_scc_positioning_is_read",
"tests/test_scc.py::SCCReaderTestCase::test_timing_is_properly_set_on_split_captions",
"tests/test_scc.py::CoverageOnlyTestCase::test_freeze_colon_spec_time",
"tests/test_scc.py::CoverageOnlyTestCase::test_freeze_rollup_captions_contents",
"tests/test_scc.py::CoverageOnlyTestCase::test_freeze_semicolon_spec_time",
"tests/test_scc.py::InterpretableNodeCreatorTestCase::test_italics_commands_are_formatted_properly",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_appending_then_appending",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_appending_then_extending",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_eoc_first_command",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_extending_then_appending",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_extending_then_extending",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_last_caption_zero_end_time_is_corrected",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_not_appending_none_or_empty_captions",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_not_extending_list_with_nones_or_empty_captions",
"tests/test_scc.py::TimingCorrectingCaptionListTestCase::test_not_overwriting_end_time"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-05 12:51:21+00:00
|
mit
| 4,469 |
|
pcdshub__hutch-python-26
|
diff --git a/hutch_python/load_conf.py b/hutch_python/load_conf.py
index 92d37389..43e7e7f4 100644
--- a/hutch_python/load_conf.py
+++ b/hutch_python/load_conf.py
@@ -19,7 +19,7 @@ def load(filename):
Returns
-------
- objs: dict{str: Object}
+ objs: dict{str: object}
All objects defined by the file that need to make it into the
environment. The strings are the names that will be accessible in the
global namespace.
@@ -31,16 +31,35 @@ def load(filename):
def read_conf(conf):
"""
- Separate this from load to make it easier to test without tons of temp
- files.
-
Parameters
----------
conf: dict
dict interpretation of the original yaml file
+
+ Returns
+ ------
+ objs: dict{str: object}
+ Return value of load
"""
hutch_python.clear_load()
- all_objects = hutch_python.objects
+ plugins = get_plugins(conf)
+ objects = run_plugins(plugins)
+ return objects
+
+
+def get_plugins(conf):
+ """
+ Parameters
+ ----------
+ conf: dict
+ dict interpretation of the original yaml file
+
+ Returns
+ -------
+ plugins: dict{int: list}
+ Mapping from priority level to list of instantiated plugins at that
+ prority.
+ """
plugins = defaultdict(list)
for plugin_name, info in conf.items():
@@ -54,6 +73,25 @@ def read_conf(conf):
this_plugin = module.Plugin(info)
plugins[this_plugin.priority].append(this_plugin)
+ return plugins
+
+
+def run_plugins(plugins):
+ """
+ Create all of the objects, given plugin instructions.
+
+ Parameters
+ ----------
+ plugins: dict{int: list}
+ Return value from get_plugins
+
+ Returns
+ ------
+ objs: dict{str: object}
+ Return value of load
+ """
+ all_objects = hutch_python.objects
+
plugin_priorities = reversed(sorted(list(plugins.keys())))
executed_plugins = []
diff --git a/hutch_python/log_setup.py b/hutch_python/log_setup.py
index 2ad989cf..51f928fa 100644
--- a/hutch_python/log_setup.py
+++ b/hutch_python/log_setup.py
@@ -7,6 +7,7 @@ import yaml
logger = logging.getLogger(__name__)
DIR_MODULE = Path(__file__).resolve().parent.parent
+DEFAULT_YAML = DIR_MODULE / 'logging.yml'
DIR_LOGS = DIR_MODULE / 'hutch_python/logs'
@@ -32,7 +33,7 @@ def setup_logging(path_yaml=None, dir_logs=None, default_level=logging.INFO):
"""
# Get the yaml path
if path_yaml is None:
- path_yaml = DIR_MODULE / 'logging.yml'
+ path_yaml = DEFAULT_YAML
# Make sure we are using Path objects
else:
path_yaml = Path(path_yaml)
diff --git a/hutch_python/plugins/namespace.py b/hutch_python/plugins/namespace.py
index 0b87ce1c..49f0ca43 100644
--- a/hutch_python/plugins/namespace.py
+++ b/hutch_python/plugins/namespace.py
@@ -65,7 +65,7 @@ class NamespaceManager:
self.name = name
def should_include(self, name, obj):
- return False
+ raise NotImplementedError('Need to subclass should_include')
def add(self, name, obj):
if self.should_include(name, obj):
diff --git a/hutch_python/utils.py b/hutch_python/utils.py
index 57e24e64..ba201a09 100644
--- a/hutch_python/utils.py
+++ b/hutch_python/utils.py
@@ -27,9 +27,10 @@ def extract_objs(module_name):
"""
objs = {}
# Allow filenames
- module_name = module_name.strip('.py')
- if '()' in module_name:
- module_name = module_name.strip('()')
+ if module_name.endswith('.py'):
+ module_name = module_name[:-3]
+ elif module_name.endswith('()'):
+ module_name = module_name[:-2]
call_me = True
else:
call_me = False
@@ -45,8 +46,9 @@ def extract_objs(module_name):
else:
objs[name] = my_obj
return objs
- except Exception:
- logger.exception('Error loading %s', module_name)
+ except Exception as exc:
+ logger.error('Error loading %s', module_name)
+ logger.debug(exc, exc_info=True)
return objs
all_kwd = getattr(module, '__all__', None)
if all_kwd is None:
diff --git a/logging.yml b/logging.yml
index dfa6cc1a..9b6906fc 100644
--- a/logging.yml
+++ b/logging.yml
@@ -3,75 +3,68 @@ disable_existing_loggers: false
# Define two formatters, one for the console and one for the the log files.
formatters:
- custom:
- (): 'coloredlogs.ColoredFormatter'
- fmt: '%(levelname)-8s %(message)s'
- file:
- format: '%(asctime)s - %(process)d - %(filename)s:%(lineno)4s - %(funcName)s(): %(levelname)-8s %(message)4s'
- datefmt: '%Y-%m-%d %H:%M:%S'
+ custom:
+ (): 'coloredlogs.ColoredFormatter'
+ fmt: '%(levelname)-8s %(message)s'
+ file:
+ format: '%(asctime)s - PID %(process)d %(filename)18s: %(lineno)-3s %(funcName)-18s %(levelname)-8s %(message)s'
+ datefmt: '%Y-%m-%d %H:%M:%S'
# There are handlers for the console stream and logging to a file for each of
# the log levels.
handlers:
- console:
- class: logging.StreamHandler
- level: INFO
- formatter: custom
- stream: ext://sys.stdout
+ console:
+ class: logging.StreamHandler
+ level: INFO
+ formatter: custom
+ stream: ext://sys.stdout
- debug:
- class: logging.handlers.RotatingFileHandler
- level: DEBUG
- formatter: file
- maxBytes: 20971520 # 20MB
- backupCount: 10
- mode: a
- delay: 0
+ debug:
+ class: logging.handlers.RotatingFileHandler
+ level: DEBUG
+ formatter: file
+ maxBytes: 20971520 # 20MB
+ backupCount: 10
+ mode: a
+ delay: 0
- info:
- class: logging.handlers.RotatingFileHandler
- level: INFO
- formatter: file
- maxBytes: 10485760 # 10MB
- backupCount: 10
- mode: a
- delay: 0
-
- warn:
- class: logging.handlers.RotatingFileHandler
- level: WARN
- formatter: file
- maxBytes: 10485760 # 10MB
- backupCount: 10
- mode: a
- delay: 0
+ info:
+ class: logging.handlers.RotatingFileHandler
+ level: INFO
+ formatter: file
+ maxBytes: 10485760 # 10MB
+ backupCount: 10
+ mode: a
+ delay: 0
+
+ warn:
+ class: logging.handlers.RotatingFileHandler
+ level: WARN
+ formatter: file
+ maxBytes: 10485760 # 10MB
+ backupCount: 10
+ mode: a
+ delay: 0
- error:
- class: logging.handlers.RotatingFileHandler
- level: ERROR
- formatter: file
- maxBytes: 10485760 # 10MB
- backupCount: 10
- mode: a
- delay: 0
+ error:
+ class: logging.handlers.RotatingFileHandler
+ level: ERROR
+ formatter: file
+ maxBytes: 10485760 # 10MB
+ backupCount: 10
+ mode: a
+ delay: 0
- critical:
- class: logging.handlers.RotatingFileHandler
- level: CRITICAL
- formatter: file
- maxBytes: 10485760 # 10MB
- backupCount: 10
- mode: a
- delay: 0
+ critical:
+ class: logging.handlers.RotatingFileHandler
+ level: CRITICAL
+ formatter: file
+ maxBytes: 10485760 # 10MB
+ backupCount: 10
+ mode: a
+ delay: 0
-# Root logger sees all the handlers, the hutch_python logger only has the console
root:
- level: DEBUG
- handlers: [console, info, error, critical, debug, warn]
- propogate: no
-
-loggers:
- <hutch_python>:
- level: INFO
- handlers: [console]
- propogate: no
+ level: DEBUG
+ handlers: [console, info, error, critical, debug, warn]
+ propogate: no
|
pcdshub/hutch-python
|
cca5e7e69ea92565f40f05ee9f2df243e9dbc246
|
diff --git a/hutch_python/tests/conftest.py b/hutch_python/tests/conftest.py
index cb5c5460..df37175a 100644
--- a/hutch_python/tests/conftest.py
+++ b/hutch_python/tests/conftest.py
@@ -1,6 +1,32 @@
import os
import sys
+import logging
+from copy import copy
+from contextlib import contextmanager
# We need to have the tests directory importable to match what we'd have in a
# real hutch-python install
sys.path.insert(0, os.path.dirname(__file__))
+
+
+@contextmanager
+def cli_args(args):
+ """
+ Context manager for running a block of code with a specific set of
+ command-line arguments.
+ """
+ prev_args = sys.argv
+ sys.argv = args
+ yield
+ sys.argv = prev_args
+
+
+@contextmanager
+def restore_logging():
+ """
+ Context manager for reverting our logging config after testing a function
+ that configures the logging.
+ """
+ prev_handlers = copy(logging.root.handlers)
+ yield
+ logging.root.handlers = prev_handlers
diff --git a/hutch_python/tests/sample_module_1.py b/hutch_python/tests/sample_module_1.py
index 5e3713e9..3a9a45da 100644
--- a/hutch_python/tests/sample_module_1.py
+++ b/hutch_python/tests/sample_module_1.py
@@ -1,3 +1,7 @@
hey = '4horses'
milk = 'cows'
some_int = 5
+
+
+def some_func():
+ return 4
diff --git a/hutch_python/tests/test_base_plugin.py b/hutch_python/tests/test_base_plugin.py
new file mode 100644
index 00000000..d0f594fa
--- /dev/null
+++ b/hutch_python/tests/test_base_plugin.py
@@ -0,0 +1,13 @@
+import logging
+
+from hutch_python.base_plugin import BasePlugin
+
+logger = logging.getLogger(__name__)
+
+
+def test_base_plugin():
+ logger.debug('test_base_plugin')
+ plugin = BasePlugin({})
+ plugin.get_objects()
+ plugin.future_object_hook('name', 'obj')
+ plugin.future_plugin_hook('source', {})
diff --git a/hutch_python/tests/test_cli.py b/hutch_python/tests/test_cli.py
new file mode 100644
index 00000000..25c8ee90
--- /dev/null
+++ b/hutch_python/tests/test_cli.py
@@ -0,0 +1,18 @@
+import os
+import logging
+
+from hutch_python.cli import setup_cli_env
+
+from conftest import cli_args, restore_logging
+
+logger = logging.getLogger(__name__)
+
+
+def test_setup_cli():
+ logger.debug('test_setup_cli')
+
+ cfg = os.path.dirname(__file__) + '/conf.yaml'
+
+ with cli_args(['hutch_python', '--cfg', cfg]):
+ with restore_logging():
+ setup_cli_env()
diff --git a/hutch_python/tests/test_load_conf.py b/hutch_python/tests/test_load_conf.py
index 39618f56..049ad85d 100644
--- a/hutch_python/tests/test_load_conf.py
+++ b/hutch_python/tests/test_load_conf.py
@@ -1,7 +1,8 @@
import logging
import os.path
-from hutch_python.load_conf import load, read_conf
+from hutch_python.base_plugin import BasePlugin
+from hutch_python.load_conf import load, read_conf, run_plugins
logger = logging.getLogger(__name__)
@@ -27,10 +28,38 @@ def test_read_empty():
def test_read_duplicate():
logger.debug('test_read_duplicate')
objs = read_conf({'load': ['sample_module_1.py', 'sample_module_1.py']})
- assert len(objs) == 3
+ assert len(objs) == 4
def test_read_only_namespaces():
logger.debug('test_read_only_namespaces')
objs = read_conf({'namespace': {'class': {'float': ['text', 'words']}}})
assert len(objs) == 2
+
+
+class BadGetObjects(BasePlugin):
+ name = 'broken'
+
+ def get_objects(self):
+ raise RuntimeError('I am broken for the test')
+
+
+class SimplePlugin(BasePlugin):
+ name = 'simple'
+
+ def get_objects(self):
+ return {'name': 'text'}
+
+
+class BadFutureHook(SimplePlugin):
+ name = 'broken'
+
+ def future_plugin_hook(self, *args, **kwargs):
+ raise RuntimeError('I am broken for the test')
+
+
+def test_skip_failures():
+ logger.debug('test_skip_failures')
+ bad_plugins = {0: [BadGetObjects({}), BadFutureHook({}), SimplePlugin({})]}
+ objs = run_plugins(bad_plugins)
+ assert objs['name'] == 'text'
diff --git a/hutch_python/tests/test_log_setup.py b/hutch_python/tests/test_log_setup.py
new file mode 100644
index 00000000..2e4e289a
--- /dev/null
+++ b/hutch_python/tests/test_log_setup.py
@@ -0,0 +1,16 @@
+import logging
+
+from hutch_python.log_setup import DEFAULT_YAML, DIR_LOGS, setup_logging
+
+from conftest import restore_logging
+
+logger = logging.getLogger(__name__)
+
+
+def test_setup_logging():
+ logger.debug('test_setup_logging')
+
+ with restore_logging():
+ setup_logging(path_yaml=DEFAULT_YAML)
+
+ assert DIR_LOGS.exists()
diff --git a/hutch_python/tests/test_plugins/test_experiment.py b/hutch_python/tests/test_plugins/test_experiment.py
index e02c808b..27e391a8 100644
--- a/hutch_python/tests/test_plugins/test_experiment.py
+++ b/hutch_python/tests/test_plugins/test_experiment.py
@@ -1,5 +1,7 @@
import logging
+import pytest
+
from hutch_python.plugins.experiment import Plugin
logger = logging.getLogger(__name__)
@@ -40,3 +42,13 @@ def test_experiment_plugin():
objs = plugin.get_objects()
assert 'x' in objs
assert 'y' in objs
+
+
+def test_experiment_auto():
+ logger.debug('test_experiment_auto')
+
+ info = {'name': 'automatic',
+ 'import': 'experiment'}
+ plugin = Plugin(info)
+ with pytest.raises(NotImplementedError):
+ plugin.get_objects()
diff --git a/hutch_python/tests/test_plugins/test_namespace.py b/hutch_python/tests/test_plugins/test_namespace.py
index e8e86b50..e6eab894 100644
--- a/hutch_python/tests/test_plugins/test_namespace.py
+++ b/hutch_python/tests/test_plugins/test_namespace.py
@@ -1,15 +1,28 @@
import logging
+from types import SimpleNamespace
-from hutch_python.plugins.namespace import Plugin
+import pytest
+
+from hutch_python.plugins.namespace import Plugin, NamespaceManager
logger = logging.getLogger(__name__)
+def test_namespace_plugin_manager():
+ logger.debug('test_namespace_plugin_base')
+
+ space = SimpleNamespace()
+ manager = NamespaceManager(space, 'test')
+ with pytest.raises(NotImplementedError):
+ manager.should_include('name', 'obj')
+
+
def test_namespace_plugin_class():
logger.debug('test_namespace_plugin_class')
objs = {'one': 1, 'two': 2.0, 'three': '3'}
info = {'class': {'float': ['flt'],
+ 'skip_bad': ['skip_me'],
'str': ['text', 'words']}}
plugin = Plugin(info)
namespaces = plugin.get_objects()
diff --git a/hutch_python/tests/test_utils.py b/hutch_python/tests/test_utils.py
index 79af2764..ee66bd41 100644
--- a/hutch_python/tests/test_utils.py
+++ b/hutch_python/tests/test_utils.py
@@ -9,15 +9,19 @@ def test_extract_objs():
logger.debug('test_extract_objs')
# Has no __all__ keyword
objs = utils.extract_objs('sample_module_1')
- assert objs == dict(hey='4horses',
- milk='cows',
- some_int=5)
+ assert objs['hey'] == '4horses'
+ assert objs['milk'] == 'cows'
+ assert objs['some_int'] == 5
# Has an __all__ keyword
objs = utils.extract_objs('sample_module_2.py')
assert objs == dict(just_this=5.0)
# Doesn't exist
objs = utils.extract_objs('fake_module_243esd')
assert objs == {}
+ objs = utils.extract_objs('sample_module_1.hey')
+ assert objs['hey'] == '4horses'
+ objs = utils.extract_objs('sample_module_1.some_func()')
+ assert objs['some_func'] == 4
def test_find_class():
diff --git a/run_tests.py b/run_tests.py
index d3a3d031..f2000c6c 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,11 +1,12 @@
#!/usr/bin/env python
import os
import sys
-import pytest
-from pathlib import Path
import logging
+from pathlib import Path
from logging.handlers import RotatingFileHandler
+import pytest
+
if __name__ == '__main__':
# Show output results from every test function
# Show the message output for skipped and expected failures
@@ -36,7 +37,7 @@ if __name__ == '__main__':
if do_rollover:
handler.doRollover()
formatter = logging.Formatter(fmt=('%(asctime)s.%(msecs)03d '
- '%(module)-10s '
+ '%(module)-15s '
'%(levelname)-8s '
'%(threadName)-10s '
'%(message)s'),
|
Add tests for files with 0 coverage
- `cli.py`
- `log_setup.py`
|
0.0
|
cca5e7e69ea92565f40f05ee9f2df243e9dbc246
|
[
"hutch_python/tests/test_base_plugin.py::test_base_plugin",
"hutch_python/tests/test_load_conf.py::test_read_empty",
"hutch_python/tests/test_load_conf.py::test_read_only_namespaces",
"hutch_python/tests/test_load_conf.py::test_skip_failures",
"hutch_python/tests/test_log_setup.py::test_setup_logging",
"hutch_python/tests/test_plugins/test_experiment.py::test_experiment_plugin",
"hutch_python/tests/test_plugins/test_experiment.py::test_experiment_auto",
"hutch_python/tests/test_plugins/test_namespace.py::test_namespace_plugin_manager",
"hutch_python/tests/test_plugins/test_namespace.py::test_namespace_plugin_class",
"hutch_python/tests/test_utils.py::test_extract_objs",
"hutch_python/tests/test_utils.py::test_find_class"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-12-22 23:19:52+00:00
|
bsd-3-clause
| 4,470 |
|
pcdshub__pcdsutils-16
|
diff --git a/pcdsutils/requirements.py b/pcdsutils/requirements.py
index cc97673..80cc3e1 100644
--- a/pcdsutils/requirements.py
+++ b/pcdsutils/requirements.py
@@ -185,10 +185,12 @@ def compare_requirements(conda_deps, pip_deps):
def _compare_requirements(args=None):
'(Console entry-point)'
parser = argparse.ArgumentParser()
- parser.description = 'Build requirements.txt files from conda meta.yaml'
+ parser.description = 'Compare requirements.txt files with conda meta.yaml'
parser.add_argument('REPO_ROOT', type=str, help='Repository root path')
parser.add_argument('--verbose', '-v', action='store_true',
help='Increase verbosity')
+ parser.add_argument('--ignore-docs', action='store_true',
+ help='Ignore documentation differences')
args = parser.parse_args(args=args)
logging.basicConfig(level='DEBUG' if args.verbose else 'INFO',
format='%(message)s')
@@ -196,6 +198,7 @@ def _compare_requirements(args=None):
repo_root = pathlib.Path(args.REPO_ROOT)
conda_deps = requirements_from_conda(repo_root=repo_root)
pip_deps = get_pip_requirements(repo_root=repo_root)
+ has_diff = False
for fn, conda_keys in PIP_REQUIREMENT_FILES.items():
logger.info('--- %s: %s ---', fn, '/'.join(conda_keys))
cdeps = _combine_conda_deps(conda_deps, conda_keys)
@@ -203,6 +206,8 @@ def _compare_requirements(args=None):
logger.debug('Comparing dependencies. cdeps=%s pdeps=%s', cdeps, pdeps)
for name, difference in compare_requirements(cdeps, pdeps).items():
if difference:
+ if not ('docs' in fn and args.ignore_docs):
+ has_diff = True
display_name = name.replace('_', ' ').capitalize()
logger.info('%s:', display_name)
for item in sorted(difference):
@@ -213,3 +218,4 @@ def _compare_requirements(args=None):
else:
logger.info('- %s', item)
logger.info('')
+ return 1 if has_diff else 0
|
pcdshub/pcdsutils
|
b713949e0e5a3ebce0805dd3e70f089089596e23
|
diff --git a/pcdsutils/tests/test_requirements.py b/pcdsutils/tests/test_requirements.py
index 371ce69..9d72118 100644
--- a/pcdsutils/tests/test_requirements.py
+++ b/pcdsutils/tests/test_requirements.py
@@ -17,6 +17,12 @@ def test_compare_requirements(repo_root):
)
+def test_compare_requirements_ignore_docs(repo_root):
+ pcdsutils.requirements._compare_requirements(
+ args=['--ignore-docs', repo_root]
+ )
+
+
def test_requirements_from_conda(repo_root):
pcdsutils.requirements._requirements_from_conda(
args=['-v', '--dry-run', repo_root]
|
requirements-compare add option to skip docs and exit code
<!--- Provide a general summary of the issue in the Title above -->
## Feature Request
- Add option to skip the check of docs requirements since a `docs` entry at meta.yaml will be a violation at conda-forge. If we are okay with keeping this extra at the PCDS side I am fine with it since the extra field doesn't seem to cause an issue with `conda-build`.
- Make requirements-compare exist with 1 instead of 0 so we can use it directly at CI.
## Current Behavior
requirements-compare exits with 0 even when files have inconsistency.
|
0.0
|
b713949e0e5a3ebce0805dd3e70f089089596e23
|
[
"pcdsutils/tests/test_requirements.py::test_compare_requirements_ignore_docs[/root/data/temp_dir/tmpg47whpb0/pcdshub__pcdsutils__0.0]"
] |
[
"pcdsutils/tests/test_requirements.py::test_compare_requirements[/root/data/temp_dir/tmpg47whpb0/pcdshub__pcdsutils__0.0]",
"pcdsutils/tests/test_requirements.py::test_requirements_from_conda[/root/data/temp_dir/tmpg47whpb0/pcdshub__pcdsutils__0.0]"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-06 01:11:20+00:00
|
bsd-3-clause
| 4,471 |
|
pcdshub__pcdsutils-44
|
diff --git a/pcdsutils/__init__.py b/pcdsutils/__init__.py
index 14ae388..f4f9e12 100644
--- a/pcdsutils/__init__.py
+++ b/pcdsutils/__init__.py
@@ -1,8 +1,8 @@
+from . import log, utils
from ._version import get_versions
-from . import utils
-from . import log
+from .enum import HelpfulIntEnum
__version__ = get_versions()['version']
del get_versions
-__all__ = ['utils', 'log']
+__all__ = ['utils', 'log', "HelpfulIntEnum"]
diff --git a/pcdsutils/enum.py b/pcdsutils/enum.py
new file mode 100644
index 0000000..2660ab5
--- /dev/null
+++ b/pcdsutils/enum.py
@@ -0,0 +1,103 @@
+from __future__ import annotations
+
+import enum
+from typing import Iterator, Set, Union
+
+EnumId = Union[enum.Enum, int, str]
+
+
+class CaseInsensitiveEnumMeta(enum.EnumMeta):
+ def __getattr__(self, key: EnumId) -> enum.Enum:
+ if hasattr(key, "lower"):
+ for item in self:
+ if item.name.lower() == key.lower():
+ return item
+ return super().__getattr__(key)
+
+ def __getitem__(self, key: EnumId) -> enum.Enum:
+ if hasattr(key, "lower"):
+ for item in self:
+ if item.name.lower() == key.lower():
+ return item
+ return super().__getitem__(key)
+
+
+class HelpfulIntEnum(enum.IntEnum, metaclass=CaseInsensitiveEnumMeta):
+ """
+ IntEnum subclass with some utility extensions and case insensitivity.
+ """
+
+ @classmethod
+ def from_any(cls, identifier: EnumId) -> HelpfulIntEnum:
+ """
+ Try all the ways to interpret identifier as the enum.
+
+ This is intended to consolidate the try/except tree typically used
+ to interpret external input as an enum.
+
+ Parameters
+ ----------
+ identifier : EnumId
+ Any str, int, or Enum value that corresponds with a valid value
+ on this HelpfulIntEnum instance.
+
+ Returns
+ -------
+ enum : HelpfulIntEnum
+ The corresponding enum object associated with the identifier.
+
+ Raises
+ ------
+ ValueError
+ If the value is not a valid enum identifier.
+ """
+ try:
+ return cls[identifier]
+ except KeyError:
+ return cls(identifier)
+
+ @classmethod
+ def include(
+ cls,
+ identifiers: Iterator[EnumId],
+ ) -> Set[HelpfulIntEnum]:
+ """
+ Returns all enum values matching the identifiers given.
+ This is a shortcut for calling cls.from_any many times and
+ assembling a set of the results.
+
+ Parameters
+ ----------
+ identifiers : Iterator[EnumId]
+ Any iterable that contains strings, ints, and Enum values that
+ correspond with valid values on this HelpfulIntEnum instance.
+
+ Returns
+ -------
+ enums : Set[HelpfulIntEnum]
+ A set whose elements are the enum objects associated with the
+ input identifiers.
+ """
+ return {cls.from_any(ident) for ident in identifiers}
+
+ @classmethod
+ def exclude(
+ cls,
+ identifiers: Iterator[EnumId],
+ ) -> Set[HelpfulIntEnum]:
+ """
+ Return all enum values other than the ones given.
+
+ Parameters
+ ----------
+ identifiers : Iterator[EnumId]
+ Any iterable that contains strings, ints, and Enum values that
+ correspond with valid values on this HelpfulIntEnum instance.
+
+ Returns
+ -------
+ enums : Set[HelpfulIntEnum]
+ A set whose elements are the valid enum objects not associated
+ with the input identifiers.
+ """
+ return set(cls.__members__.values()) - cls.include(identifiers)
|
pcdshub/pcdsutils
|
d23c986683084e824650ad6506df74d836ee55bd
|
diff --git a/pcdsutils/tests/test_enum.py b/pcdsutils/tests/test_enum.py
new file mode 100644
index 0000000..2335813
--- /dev/null
+++ b/pcdsutils/tests/test_enum.py
@@ -0,0 +1,79 @@
+import pytest
+
+from ..enum import HelpfulIntEnum
+
+
+def test_subclass():
+ class MyEnum(HelpfulIntEnum):
+ A = 4
+ B = 5
+ C = 6
+
+ assert MyEnum(4) == MyEnum.A
+ assert MyEnum(5) == MyEnum.B
+ assert MyEnum(6) == MyEnum.C
+
+ with pytest.raises(ValueError):
+ MyEnum(7)
+
+ assert MyEnum["A"] == MyEnum.A == 4
+ assert MyEnum["a"] == MyEnum.A == 4
+ assert MyEnum["a"] == MyEnum.a == 4
+ assert MyEnum.from_any("a") == MyEnum.A == 4
+ assert MyEnum.from_any(4) == MyEnum.A == 4
+
+ assert MyEnum["C"] == MyEnum.C == 6
+ assert MyEnum["c"] == MyEnum.C == 6
+ assert MyEnum["c"] == MyEnum.c == 6
+ assert MyEnum.from_any("c") == MyEnum.C == 6
+ assert MyEnum.from_any(6) == MyEnum.C == 6
+
+
+def test_functional():
+ MyEnum = HelpfulIntEnum("MyEnum", {"a": 1, "B": 2}, module=__name__)
+
+ assert MyEnum["A"] == MyEnum.A == 1
+ assert MyEnum["a"] == MyEnum.A == 1
+ assert MyEnum["a"] == MyEnum.a == 1
+ assert MyEnum.from_any("a") == MyEnum.A
+ assert MyEnum.from_any(1) == MyEnum.A
+
+ assert MyEnum["B"] == MyEnum.B == 2
+ assert MyEnum["b"] == MyEnum.B == 2
+ assert MyEnum["b"] == MyEnum.b == 2
+ assert MyEnum.from_any("b") == MyEnum.B == 2
+ assert MyEnum.from_any(2) == MyEnum.B == 2
+
+
+def test_functional_list():
+ MyEnum = HelpfulIntEnum("MyEnum", ["a", "b"], start=1, module=__name__)
+
+ assert MyEnum["A"] == MyEnum.A == 1
+ assert MyEnum["a"] == MyEnum.A == 1
+ assert MyEnum["a"] == MyEnum.a == 1
+ assert MyEnum.from_any("a") == MyEnum.A
+ assert MyEnum.from_any(1) == MyEnum.A
+
+ assert MyEnum["B"] == MyEnum.B == 2
+ assert MyEnum["b"] == MyEnum.B == 2
+ assert MyEnum["b"] == MyEnum.b == 2
+ assert MyEnum.from_any("b") == MyEnum.B == 2
+ assert MyEnum.from_any(2) == MyEnum.B == 2
+
+
+def test_include():
+ class MyEnum(HelpfulIntEnum):
+ A = 4
+ B = 5
+ C = 6
+
+ assert MyEnum.include([4, "c"]) == {MyEnum.A, MyEnum.C}
+
+
+def test_exclude():
+ class MyEnum(HelpfulIntEnum):
+ A = 4
+ B = 5
+ C = 6
+
+ assert MyEnum.exclude([4, "c"]) == {MyEnum.B}
|
Add HelpfulIntEnum from pcdsdaq
https://github.com/pcdshub/pcdsdaq/pull/108/files#r785021109
https://github.com/pcdshub/pcdsdaq/pull/108/files#r785022116
pcdsdaq/pcdsdaq/daq/interface.py
|
0.0
|
d23c986683084e824650ad6506df74d836ee55bd
|
[
"pcdsutils/tests/test_enum.py::test_subclass",
"pcdsutils/tests/test_enum.py::test_functional",
"pcdsutils/tests/test_enum.py::test_functional_list",
"pcdsutils/tests/test_enum.py::test_include",
"pcdsutils/tests/test_enum.py::test_exclude"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-02-02 22:01:48+00:00
|
bsd-3-clause
| 4,472 |
|
pcdshub__pcdsutils-59
|
diff --git a/pcdsutils/profile.py b/pcdsutils/profile.py
index 7916aaf..d2e80e2 100644
--- a/pcdsutils/profile.py
+++ b/pcdsutils/profile.py
@@ -8,9 +8,10 @@ import logging
import pkgutil
import warnings
from contextlib import contextmanager
-from inspect import isclass, isfunction
+from inspect import getmembers, isclass, isfunction, ismethod
from types import ModuleType
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
+from typing import (Any, Callable, Dict, Iterable, Iterator, List, Optional,
+ Tuple)
logger = logging.getLogger(__name__)
@@ -54,7 +55,7 @@ def profiler_context(
use_global_profiler: bool = False,
output_now: bool = True,
min_threshold: float = 0,
-) -> LineProfiler:
+) -> Iterator[LineProfiler]:
"""
Context manager for profiling a fixed span of an application.
@@ -397,7 +398,7 @@ def get_native_methods(
native_methods = set()
if seen is None:
seen = set()
- for obj in module_or_cls.__dict__.values():
+ for _, obj in getmembers(module_or_cls):
try:
if obj in seen:
continue
@@ -413,7 +414,7 @@ def get_native_methods(
if isclass(obj):
inner_methods = get_native_methods(obj, module, seen=seen)
native_methods.update(inner_methods)
- elif isfunction(obj):
+ elif isfunction(obj) or ismethod(obj):
native_methods.add(obj)
return native_methods
|
pcdshub/pcdsutils
|
69cf8f7ad027e13ef56c5ebe0098aec4b0e93597
|
diff --git a/pcdsutils/tests/dummy_submodule.py b/pcdsutils/tests/dummy_submodule.py
new file mode 100644
index 0000000..e5783ec
--- /dev/null
+++ b/pcdsutils/tests/dummy_submodule.py
@@ -0,0 +1,24 @@
+"""
+Submodule to import from for test_profile
+"""
+from inspect import getmembers
+
+
+class SomeClass:
+ def __init__(self):
+ self.some_attr = 1
+
+ def method(self):
+ return getmembers(self)
+
+ @classmethod
+ def cls_method(cls):
+ return 2
+
+ @staticmethod
+ def stat_method():
+ return 3
+
+
+def some_function():
+ return 4
diff --git a/pcdsutils/tests/test_profile.py b/pcdsutils/tests/test_profile.py
new file mode 100644
index 0000000..f4c7b81
--- /dev/null
+++ b/pcdsutils/tests/test_profile.py
@@ -0,0 +1,66 @@
+"""
+Check some of the profiler internal utilities.
+"""
+import logging
+import os.path
+
+import pytest
+
+from ..profile import (get_native_functions, get_submodules, is_native,
+ profiler_context)
+from . import dummy_submodule
+
+logger = logging.getLogger(__name__)
+
+
+def test_is_native():
+ # Was defined there
+ assert is_native(dummy_submodule.some_function, dummy_submodule)
+ # Is available here, but was not defined here
+ assert not is_native(dummy_submodule.getmembers, dummy_submodule)
+ # Argument 2 must be a module
+ with pytest.raises(TypeError):
+ is_native(dummy_submodule.some_function, "profile")
+ # Primitives have no source module information
+ with pytest.raises(TypeError):
+ is_native('text', dummy_submodule)
+
+
+def test_get_native_functions():
+ dummy_natives = get_native_functions(dummy_submodule)
+ # We know about these from the way the test is set up
+ assert dummy_submodule.some_function in dummy_natives
+ assert dummy_submodule.SomeClass.method in dummy_natives
+ assert dummy_submodule.SomeClass.cls_method in dummy_natives
+ assert dummy_submodule.SomeClass.stat_method in dummy_natives
+ # This shouldn't be there at all
+ assert test_get_native_functions not in dummy_natives
+ # This is imported in profile but not native
+ assert dummy_submodule.getmembers not in dummy_natives
+
+
+def test_get_submodules():
+ submodules = get_submodules('pcdsutils.tests')
+ assert dummy_submodule in submodules
+
+
+def test_basic_profiler():
+ # Run through and make sure our functions are included
+ with profiler_context(['pcdsutils']) as profiler:
+ dummy_submodule.some_function()
+ some_obj = dummy_submodule.SomeClass()
+ some_obj.method()
+ some_obj.cls_method()
+ some_obj.stat_method()
+
+ timings = profiler.get_stats().timings
+ functions_profiled = [
+ (os.path.basename(file), func)
+ for (file, lineno, func), stats in timings.items() if stats
+ ]
+ logger.debug(functions_profiled)
+ assert ('dummy_submodule.py', '__init__') in functions_profiled
+ assert ('dummy_submodule.py', 'some_function') in functions_profiled
+ assert ('dummy_submodule.py', 'method') in functions_profiled
+ assert ('dummy_submodule.py', 'cls_method') in functions_profiled
+ assert ('dummy_submodule.py', 'stat_method') in functions_profiled
|
Profiler should include class methods as well
`isfunction` appears to be insufficient:
https://github.com/pcdshub/pcdsutils/blob/69cf8f7ad027e13ef56c5ebe0098aec4b0e93597/pcdsutils/profile.py#L416
`inspect.ismethod` should be added as well, per the prototype of this functionality in typhos https://github.com/pcdshub/typhos/pull/505
|
0.0
|
69cf8f7ad027e13ef56c5ebe0098aec4b0e93597
|
[
"pcdsutils/tests/test_profile.py::test_get_native_functions"
] |
[
"pcdsutils/tests/test_profile.py::test_is_native",
"pcdsutils/tests/test_profile.py::test_get_submodules"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-16 22:32:51+00:00
|
bsd-3-clause
| 4,473 |
|
pcdshub__pytmc-262
|
diff --git a/docs/pragma_usage.rst b/docs/pragma_usage.rst
index e8e4e18..3c56768 100644
--- a/docs/pragma_usage.rst
+++ b/docs/pragma_usage.rst
@@ -356,6 +356,35 @@ Use the ``notify`` keyword in the ``update`` setting to enable this:
records will be processed at a rate of 1 Hz/the IOC-configured poll rate.
+Scale and Offset
+................
+
+Integer and floating point values may have an EPICS-side scaling applied.
+
+Example:
+
+.. code-block:: none
+
+ scale: 3.0
+ offset: 1.0
+
+Values will be scaled according to the following:
+
+.. code-block:: none
+
+ readback_value = raw_value * scale + offset
+ setpoint_value = (user_value - offset) / scale
+
+.. note::
+
+ If either ``scale`` or ``offset`` are applied to an integer symbol, the
+ generated EPICS record type will no longer be a "long" integer input/output
+ record but rather change to an analog input/output record.
+
+ Keep this in mind if using advanced "field" directives in your pragmas.
+
+If unspecified, ``scale`` will be assumed to be 1.0 and ``offset`` 0.0.
+
Archiver settings
.................
diff --git a/pytmc/pragmas.py b/pytmc/pragmas.py
index 361fec1..821959c 100644
--- a/pytmc/pragmas.py
+++ b/pytmc/pragmas.py
@@ -624,6 +624,14 @@ def parse_array_settings(pragma, dimensions):
)
+# Helpers which normalize various pragma values.
+_normalizers = {
+ 'io': (normalize_io, 'io'),
+ 'update': (parse_update_rate, '1s poll'),
+ 'archive': (parse_archive_settings, '1s scan'),
+}
+
+
def normalize_config(config):
'''
Parse and normalize pragma values into Python representations
@@ -640,12 +648,8 @@ def normalize_config(config):
dict
A shallow-copy of ``config`` with parsed and normalized values
'''
- key_to_parser = {'io': (normalize_io, 'io'),
- 'update': (parse_update_rate, '1s poll'),
- 'archive': (parse_archive_settings, '1s scan'),
- }
ret = dict(config)
- for key, (parser_func, default) in key_to_parser.items():
+ for key, (parser_func, default) in _normalizers.items():
ret[key] = parser_func(ret.get(key, default))
return ret
diff --git a/pytmc/record.py b/pytmc/record.py
index 6cc36d9..84500d7 100644
--- a/pytmc/record.py
+++ b/pytmc/record.py
@@ -306,6 +306,12 @@ class RecordPackage:
f'Unsupported data type {data_type.name} in chain: '
f'{chain.tcname} record: {chain.pvname}'
) from None
+ if spec is IntegerRecordPackage:
+ if "scale" in chain.config or "offset" in chain.config:
+ # longin/longout do not support scaling. Special-case and
+ # cast to a FloatRecordPackage.
+ spec = FloatRecordPackage
+
return spec(*args, chain=chain, **kwargs)
@@ -572,6 +578,37 @@ class FloatRecordPackage(TwincatTypeRecordPackage):
pass1={}),
}
+ def get_scale_offset(self):
+ """Get the scale and offset for the analog record(s)."""
+ scale = self.config.get("scale", None)
+ offset = self.config.get("offset", None)
+ if scale is None and offset is None:
+ return None, None
+
+ scale = "1.0" if scale is None else scale
+ offset = offset or "0.0"
+ return scale, offset
+
+ def generate_input_record(self):
+ record = super().generate_input_record()
+ scale, offset = self.get_scale_offset()
+ if scale is not None:
+ # If LINR==SLOPE, VAL = RVAL * ESLO + EOFF
+ record.fields["LINR"] = "SLOPE"
+ record.fields["ESLO"] = scale
+ record.fields["EOFF"] = offset
+ return record
+
+ def generate_output_record(self):
+ record = super().generate_output_record()
+ scale, offset = self.get_scale_offset()
+ if scale is not None:
+ # If LINR==SLOPE, then RVAL = (VAL - EOFF) / ESLO
+ record.fields["LINR"] = "SLOPE"
+ record.fields["ESLO"] = scale
+ record.fields["EOFF"] = offset
+ return record
+
class EnumRecordPackage(TwincatTypeRecordPackage):
"""Create a set of record for a ENUM Twincat Variable"""
|
pcdshub/pytmc
|
1e2af6ab3c5a5c4322b65b0115b41e6304a2bcfb
|
diff --git a/pytmc/tests/test_xml_collector.py b/pytmc/tests/test_xml_collector.py
index b53963d..d05f52a 100644
--- a/pytmc/tests/test_xml_collector.py
+++ b/pytmc/tests/test_xml_collector.py
@@ -687,3 +687,51 @@ def test_sub_io_change(dimensions, pragma, expected_records):
for pkg in pragmas.record_packages_from_symbol(outer)
for record in pkg.records)
assert set(record_names) == expected_records
+
+
[email protected](
+ "data_type, pragma, expected_scale, expected_offset",
+ [
+ pytest.param(
+ "FLOAT",
+ "pv: PREFIX; scale: 2.0; offset: 1.0",
+ "2.0",
+ "1.0",
+ id="float-scale-and-offset",
+ ),
+ pytest.param(
+ "UDINT",
+ "pv: PREFIX; scale: 3.0; offset: 0.1",
+ "3.0",
+ "0.1",
+ id="int-scale-and-offset",
+ ),
+ pytest.param(
+ "UDINT",
+ "pv: PREFIX; scale: 3.0",
+ "3.0",
+ "0.0",
+ id="int-no-offset",
+ ),
+ pytest.param(
+ "UDINT",
+ "pv: PREFIX; offset: 3.0",
+ "1.0",
+ "3.0",
+ id="int-no-scale",
+ ),
+ ],
+)
+def test_scale_and_offset(data_type, pragma, expected_scale, expected_offset):
+ item = make_mock_twincatitem(
+ name='Main.obj',
+ data_type=make_mock_type('UDINT', is_complex_type=False),
+ pragma=pragma,
+ )
+
+ pkg, = list(pragmas.record_packages_from_symbol(item))
+ for rec in pkg.records:
+ assert rec.fields['LINR'] == "SLOPE"
+ assert rec.fields['ESLO'] == expected_scale
+ assert rec.fields['EOFF'] == expected_offset
+ assert rec.record_type in {"ai", "ao"}
|
Pragma enhancement: "scale" key
Request
---------
Ability to scale integer (or floating point) values from the PLC on the EPICS side in a consistent way.
"scale" key
-----------
e.g.,
```
scale: 1.234
offset: 10.0
```
Presence of a `scale` key on a record normally generated as `longin` would then create an `ai` record instead.
Alternative: `calc` key
--------------------
e.g., `calc: 2 * value + 1.54`
Maybe consider a `calc` field which may allow generic calculations by way of an auto-generated `calc` record.
This could then allow for easy slope, offset, and _perhaps_ even references to other symbols. (That last part is iffy, though)
cc @slacAWallace
|
0.0
|
1e2af6ab3c5a5c4322b65b0115b41e6304a2bcfb
|
[
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[float-scale-and-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-scale-and-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-no-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-no-scale]"
] |
[
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[BOOL-False-BinaryRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[BOOL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[INT-False-IntegerRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[INT-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[DINT-False-IntegerRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[DINT-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[ENUM-False-EnumRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[ENUM-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[REAL-False-FloatRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[REAL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[LREAL-False-FloatRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[LREAL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[STRING-False-StringRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-i-True-asynInt8ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-io-True-asynInt8ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-i-True-asynInt16ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-io-True-asynInt16ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-i-True-asynInt32ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-io-True-asynInt32ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-i-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-io-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-i-True-asynFloat32ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-io-True-asynFloat32ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-i-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-io-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-i-True-asynFloat64ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-io-True-asynFloat64ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-i-True-asynInt16ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-io-True-asynInt16ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[STRING-i-False-asynInt8ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[STRING-io-False-asynInt8ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BYTE-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BYTE-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[SINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[SINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[USINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[USINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[WORD-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[WORD-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[INT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[INT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DWORD-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DWORD-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UDINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UDINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[LREAL-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[LREAL-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[STRING-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[STRING-6--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-1hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-2hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.5hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.02hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.1hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-50s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-10s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-2s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-1s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.5s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.1s",
"pytmc/tests/test_xml_collector.py::test_bool_naming[BOOL-0-FALSE-TRUE-True]",
"pytmc/tests/test_xml_collector.py::test_bool_naming[STRING-0-None-None-False]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_PREC[LREAL-0-3-True]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_PREC[STRING-0-None-False]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-o-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-i-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-o-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-io-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-i-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-o-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-io-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-i-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-o-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-io-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-i-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-o-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-io-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-i-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-o-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-io-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-i-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-o-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-io-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-i-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-o-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-io-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[INT-0-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[INT-0-False-True-3]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[LREAL-0-False-True-9]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[STRING-0-True-False-81]",
"pytmc/tests/test_xml_collector.py::test_scalar",
"pytmc/tests/test_xml_collector.py::test_complex_array",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1,3]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[..1]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[1..]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1,99..]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[separate_prefixes]",
"pytmc/tests/test_xml_collector.py::test_enum_array",
"pytmc/tests/test_xml_collector.py::test_unroll_formatting",
"pytmc/tests/test_xml_collector.py::test_pv_linking",
"pytmc/tests/test_xml_collector.py::test_pv_linking_special",
"pytmc/tests/test_xml_collector.py::test_waveform_archive[over_threshold]",
"pytmc/tests/test_xml_collector.py::test_waveform_archive[under_threshold]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[0..1]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[1..3,5,7,9]",
"pytmc/tests/test_xml_collector.py::test_sub_io_change[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_sub_io_change[change_to_io]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-27 00:19:25+00:00
|
bsd-3-clause
| 4,474 |
|
pcdshub__pytmc-268
|
diff --git a/.travis.yml b/.travis.yml
index 0def5d3..e7a6f17 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,7 +18,7 @@ env:
jobs:
allow_failures:
- - name: "Python 3.6 - PIP"
+ - name: "Python 3.8 - PIP"
after_failure:
- cat logs/run_tests_log.txt
diff --git a/pytmc/record.py b/pytmc/record.py
index 84500d7..5451552 100644
--- a/pytmc/record.py
+++ b/pytmc/record.py
@@ -357,6 +357,9 @@ class TwincatTypeRecordPackage(RecordPackage):
output_only_fields = {'DOL', 'IVOA', 'IVOV', 'OMSL'}
archive_fields = ['VAL']
+ # Is an auxiliary record required to support existing record linking?
+ link_requires_record = False
+
def __init_subclass__(cls, **kwargs):
"""Magic to have field_defaults be the combination of hierarchy"""
super().__init_subclass__(**kwargs)
@@ -465,6 +468,35 @@ class TwincatTypeRecordPackage(RecordPackage):
return record
+ def _get_omsl_fields(self):
+ """Get output mode select fields for the output record."""
+ if not self.linked_to_pv or self.linked_to_pv[-1] is None:
+ return {}
+
+ last_link = self.linked_to_pv[-1]
+ if last_link.startswith('*'):
+ # NOTE: A special, undocumented syntax for a lack of a better
+ # idea/more time: need to allow pytmc to get access to a PV name
+ # it generates
+ # Consider this temporary API, only to be used in
+ # lcls-twincat-general for now.
+ pv_parts = list(self.config['pv'])
+ linked_to_pv = self.delimiter.join(
+ pv_parts[:-1] + [last_link.lstrip('*')]
+ )
+ else:
+ linked_to_pv = ''.join(
+ part for part in self.linked_to_pv
+ if part is not None
+ )
+
+ linked_to_pv = linked_to_pv.replace(self.macro_character, '$')
+ return {
+ 'OMSL': 'closed_loop',
+ 'DOL': linked_to_pv + ' CPP MS',
+ 'SCAN': self.config.get('link_scan', '.5 second'),
+ }
+
def generate_output_record(self):
"""
Generate the record to write values back to the PLC
@@ -495,26 +527,11 @@ class TwincatTypeRecordPackage(RecordPackage):
record.fields.pop('TSE', None)
record.fields.pop('PINI', None)
- if self.linked_to_pv and self.linked_to_pv[-1] is not None:
- record.fields['OMSL'] = 'closed_loop'
-
- last_link = self.linked_to_pv[-1]
- if last_link.startswith('*'):
- # NOTE: A special, undocumented syntax for a lack of a better
- # idea/more time: need to allow pytmc to get access to a PV
- # name it generates
- # Consider this temporary API, only to be used in
- # lcls-twincat-general for now.
- pv_parts = list(self.config['pv'])
- linked_to_pv = ':'.join(pv_parts[:-1] +
- [last_link.lstrip('*')])
- else:
- linked_to_pv = ''.join([part for part in self.linked_to_pv
- if part is not None])
-
- linked_to_pv = linked_to_pv.replace(self.macro_character, '$')
- record.fields['DOL'] = linked_to_pv + ' CPP MS'
- record.fields['SCAN'] = self.config.get('link_scan', '.5 second')
+ # Add on OMSL fields, if this is linked to an existing record.
+ # Some links (such as strings) may require auxiliary records, so
+ # don't repurpose the output record in that case.
+ if not self.link_requires_record:
+ record.fields.update(self._get_omsl_fields())
# Update with given pragma fields - ignoring input-only fields:
user_fields = self.config.get('field', {})
@@ -735,7 +752,15 @@ class StringRecordPackage(TwincatTypeRecordPackage):
input_rtyp = 'waveform'
output_rtyp = 'waveform'
dtyp = 'asynInt8'
- field_defaults = {'FTVL': 'CHAR'}
+ field_defaults = {
+ 'FTVL': 'CHAR',
+ 'APST': 'On Change',
+ 'MPST': 'On Change',
+ }
+
+ # Links to string PVs require auxiliary 'lso' record.
+ link_requires_record = True
+ link_suffix = "LSO"
@property
def nelm(self):
@@ -756,6 +781,38 @@ class StringRecordPackage(TwincatTypeRecordPackage):
record.fields['NELM'] = self.nelm
return record
+ def generate_link_record(self):
+ """An auxiliary 'lso' link record to pass string PVs to the PLC."""
+ record = EPICSRecord(
+ self.delimiter.join((self.pvname, self.link_suffix)),
+ record_type="lso",
+ direction="output",
+ package=self,
+ )
+ record.fields.pop('TSE', None)
+ record.fields.pop('PINI', None)
+ record.fields["SIZV"] = self.nelm
+
+ # Add our port
+ record.fields.update(
+ self._get_omsl_fields()
+ )
+ record.fields['OUT'] = f"{self.pvname} PP MS"
+ _update_description(record, f"Aux link record for {self.chain.tcname}")
+ return record
+
+ @property
+ def records(self):
+ """All records that will be created in the package"""
+ records = [self.generate_input_record()]
+ link_fields = self._get_omsl_fields()
+ if self.io_direction == 'output' or link_fields:
+ records.append(self.generate_output_record())
+ if link_fields and self.link_requires_record:
+ records.append(self.generate_link_record())
+
+ return records
+
DATA_TYPES = {
'BOOL': BinaryRecordPackage,
|
pcdshub/pytmc
|
fb40d723ee2a50b988030e32541a74d317edd396
|
diff --git a/pytmc/tests/test_xml_collector.py b/pytmc/tests/test_xml_collector.py
index d05f52a..ce532b8 100644
--- a/pytmc/tests/test_xml_collector.py
+++ b/pytmc/tests/test_xml_collector.py
@@ -521,6 +521,29 @@ def test_pv_linking():
assert rec.fields['SCAN'] == '.5 second'
+def test_pv_linking_string():
+ item = make_mock_twincatitem(
+ name='Main.tcname', data_type=make_mock_type('STRING', length=70),
+ pragma='pv: PVNAME; link: OTHER:RECORD.VAL$')
+
+ pkg, = list(pragmas.record_packages_from_symbol(item))
+ assert pkg.pvname == 'PVNAME'
+ assert pkg.tcname == 'Main.tcname'
+ assert isinstance(pkg, StringRecordPackage)
+
+ in_rec, out_rec, lso_rec = pkg.records
+ assert "OMSL" not in out_rec.fields
+ assert "DOL" not in out_rec.fields
+
+ lso_pvname = pkg.delimiter.join((out_rec.pvname, pkg.link_suffix))
+ assert lso_rec.pvname == lso_pvname
+ assert lso_rec.record_type == "lso"
+ assert lso_rec.fields["OMSL"] == "closed_loop"
+ assert lso_rec.fields["DOL"] == "OTHER:RECORD.VAL$ CPP MS"
+ assert lso_rec.fields["SCAN"] == ".5 second"
+ assert lso_rec.fields["SIZV"] == 70
+
+
def test_pv_linking_special():
struct = make_mock_twincatitem(
name='Main.array_base',
|
Support EPICS PV "link" for string values
One additional record will be required, unfortunately, to link up the waveform PV which writes to the PLC with the source PV:
`source -> lso -> waveform -> PLC`
Where `lso` the newish long string out record, that is.
This looks like:
```
record(waveform, "TO_PLC") {
field(SCAN, ".5 second")
field(DTYP, "asynInt8ArrayOut")
field(INP, "@asyn($(PORT),0,1)ADSPORT=851/MAIN.fbLinkedValue2.sPLCInternalValue=")
field(FTVL, "CHAR")
field(NELM, "80")
}
record(lso, "LSO_HELPER") {
field(DOL, "(SOURCE_PV) CPP MS")
field(OMSL, "closed_loop")
field(OUT, "TO_PLC PP MS")
field(TPRO, 1)
}
```
80, just as a reminder: `If no size is specified, TwinCAT assumes 80 characters by default.`
This could be up to 255, but given EPICS's string limitations I don't really think supporting beyond 80 is really necessary to start with.
|
0.0
|
fb40d723ee2a50b988030e32541a74d317edd396
|
[
"pytmc/tests/test_xml_collector.py::test_pv_linking_string"
] |
[
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[BOOL-False-BinaryRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[BOOL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[INT-False-IntegerRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[INT-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[DINT-False-IntegerRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[DINT-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[ENUM-False-EnumRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[ENUM-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[REAL-False-FloatRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[REAL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[LREAL-False-FloatRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[LREAL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[STRING-False-StringRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-i-True-asynInt8ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-io-True-asynInt8ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-i-True-asynInt16ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-io-True-asynInt16ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-i-True-asynInt32ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-io-True-asynInt32ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-i-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-io-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-i-True-asynFloat32ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-io-True-asynFloat32ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-i-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-io-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-i-True-asynFloat64ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-io-True-asynFloat64ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-i-True-asynInt16ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-io-True-asynInt16ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[STRING-i-False-asynInt8ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[STRING-io-False-asynInt8ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BYTE-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BYTE-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[SINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[SINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[USINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[USINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[WORD-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[WORD-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[INT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[INT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DWORD-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DWORD-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UDINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UDINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[LREAL-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[LREAL-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[STRING-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[STRING-6--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-1hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-2hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.5hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.02hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.1hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-50s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-10s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-2s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-1s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.5s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.1s",
"pytmc/tests/test_xml_collector.py::test_bool_naming[BOOL-0-FALSE-TRUE-True]",
"pytmc/tests/test_xml_collector.py::test_bool_naming[STRING-0-None-None-False]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_PREC[LREAL-0-3-True]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_PREC[STRING-0-None-False]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-o-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-i-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-o-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-io-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-i-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-o-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-io-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-i-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-o-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-io-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-i-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-o-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-io-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-i-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-o-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-io-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-i-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-o-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-io-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-i-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-o-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-io-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[INT-0-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[INT-0-False-True-3]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[LREAL-0-False-True-9]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[STRING-0-True-False-81]",
"pytmc/tests/test_xml_collector.py::test_scalar",
"pytmc/tests/test_xml_collector.py::test_complex_array",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1,3]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[..1]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[1..]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1,99..]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[separate_prefixes]",
"pytmc/tests/test_xml_collector.py::test_enum_array",
"pytmc/tests/test_xml_collector.py::test_unroll_formatting",
"pytmc/tests/test_xml_collector.py::test_pv_linking",
"pytmc/tests/test_xml_collector.py::test_pv_linking_special",
"pytmc/tests/test_xml_collector.py::test_waveform_archive[over_threshold]",
"pytmc/tests/test_xml_collector.py::test_waveform_archive[under_threshold]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[0..1]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[1..3,5,7,9]",
"pytmc/tests/test_xml_collector.py::test_sub_io_change[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_sub_io_change[change_to_io]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[float-scale-and-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-scale-and-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-no-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-no-scale]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-12 22:14:59+00:00
|
bsd-3-clause
| 4,475 |
|
pcdshub__pytmc-283
|
diff --git a/pytmc/record.py b/pytmc/record.py
index 5451552..0f04c67 100644
--- a/pytmc/record.py
+++ b/pytmc/record.py
@@ -214,7 +214,7 @@ class RecordPackage:
return config
def _configure_link(self):
- 'Link this record to a pre-existing EPICS record via a CA (CPP) link'
+ 'Link this record to a pre-existing EPICS record via a CA (CP) link'
self.linked_to_pv = self.config.get('link') or None
def _configure_pvname(self):
@@ -493,7 +493,7 @@ class TwincatTypeRecordPackage(RecordPackage):
linked_to_pv = linked_to_pv.replace(self.macro_character, '$')
return {
'OMSL': 'closed_loop',
- 'DOL': linked_to_pv + ' CPP MS',
+ 'DOL': linked_to_pv + ' CP MS',
'SCAN': self.config.get('link_scan', '.5 second'),
}
|
pcdshub/pytmc
|
bd2529195298e5b1dbfd96772a7702b2199f4f4b
|
diff --git a/pytmc/tests/test_xml_collector.py b/pytmc/tests/test_xml_collector.py
index c709c10..1051146 100644
--- a/pytmc/tests/test_xml_collector.py
+++ b/pytmc/tests/test_xml_collector.py
@@ -517,7 +517,7 @@ def test_pv_linking():
assert isinstance(pkg, IntegerRecordPackage)
rec = pkg.generate_output_record()
assert rec.fields['OMSL'] == 'closed_loop'
- assert rec.fields['DOL'] == 'OTHER:RECORD CPP MS'
+ assert rec.fields['DOL'] == 'OTHER:RECORD CP MS'
assert rec.fields['SCAN'] == '.5 second'
@@ -539,7 +539,7 @@ def test_pv_linking_string():
assert lso_rec.pvname == lso_pvname
assert lso_rec.record_type == "lso"
assert lso_rec.fields["OMSL"] == "closed_loop"
- assert lso_rec.fields["DOL"] == "OTHER:RECORD.VAL$ CPP MS"
+ assert lso_rec.fields["DOL"] == "OTHER:RECORD.VAL$ CP MS"
assert lso_rec.fields["SCAN"] == ".5 second"
assert lso_rec.fields["SIZV"] == 70
@@ -571,10 +571,10 @@ def test_pv_linking_struct():
pkg1, pkg2 = list(pragmas.record_packages_from_symbol(struct))
rec = pkg1.generate_output_record()
- assert rec.fields['DOL'] == 'PREFIX:ABCD.STAT CPP MS'
+ assert rec.fields['DOL'] == 'PREFIX:ABCD.STAT CP MS'
rec = pkg2.generate_output_record()
- assert rec.fields['DOL'] == 'LINK:OTHER_PV CPP MS'
+ assert rec.fields['DOL'] == 'LINK:OTHER_PV CP MS'
def test_pv_linking_special():
@@ -594,7 +594,7 @@ def test_pv_linking_special():
pkg, = list(pragmas.record_packages_from_symbol(struct))
rec = pkg.generate_output_record()
- assert rec.fields['DOL'] == 'PREFIX:ABCD.STAT CPP MS'
+ assert rec.fields['DOL'] == 'PREFIX:ABCD.STAT CP MS'
@pytest.mark.parametrize(
|
pytmc link settings should be "CP" instead of "CPP"
Code:
https://github.com/pcdshub/pytmc/blob/bd2529195298e5b1dbfd96772a7702b2199f4f4b/pytmc/record.py#L496
Otherwise, we will be stuck at the 2Hz updates in the record:
> If the input link specifies CA, CP, or CPP, regardless of the location of the process variable being referenced, it will be forced to be a Channel Access link. This is helpful for separating process chains that are not tightly related. If the input link specifies CP, it also causes the record containing the input link to process whenever a monitor is posted, no matter what the record’s SCAN field specifies. If the input link specifies CPP, it causes the record to be processed if and only if the record with the CPP link has a SCAN field set to Passive. In other words, CP and CPP cause the record containing the link to be processed with the process variable that they reference changes.
|
0.0
|
bd2529195298e5b1dbfd96772a7702b2199f4f4b
|
[
"pytmc/tests/test_xml_collector.py::test_pv_linking",
"pytmc/tests/test_xml_collector.py::test_pv_linking_string",
"pytmc/tests/test_xml_collector.py::test_pv_linking_struct",
"pytmc/tests/test_xml_collector.py::test_pv_linking_special"
] |
[
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[BOOL-False-BinaryRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[BOOL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[INT-False-IntegerRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[INT-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[DINT-False-IntegerRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[DINT-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[ENUM-False-EnumRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[ENUM-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[REAL-False-FloatRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[REAL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[LREAL-False-FloatRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[LREAL-True-WaveformRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_record_package_from_chain[STRING-False-StringRecordPackage]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-i-True-asynInt8ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[BOOL-io-True-asynInt8ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-i-True-asynInt16ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[INT-io-True-asynInt16ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-i-True-asynInt32ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[DINT-io-True-asynInt32ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-i-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-io-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-i-True-asynFloat32ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[REAL-io-True-asynFloat32ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-i-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-io-False-asynFloat64]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-i-True-asynFloat64ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[LREAL-io-True-asynFloat64ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-i-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-io-False-asynInt32]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-i-True-asynInt16ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[ENUM-io-True-asynInt16ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_dtype[STRING-i-False-asynInt8ArrayIn]",
"pytmc/tests/test_xml_collector.py::test_dtype[STRING-io-False-asynInt8ArrayOut]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BYTE-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BYTE-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[SINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[SINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[USINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[USINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[WORD-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[WORD-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[INT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[INT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DWORD-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DWORD-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[DINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UDINT-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[UDINT-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[LREAL-0--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[LREAL-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[STRING-2--INP-@asyn($(PORT),0,1)ADSPORT=851/POLL_RATE=1/a.b.c?]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[STRING-6--OUT-@asyn($(PORT),0,1)ADSPORT=851/a.b.c=]",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-1hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-2hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.5hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.02hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.1hz",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-50s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-10s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-2s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-1s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.5s",
"pytmc/tests/test_xml_collector.py::test_input_output_scan[BOOL-2-0.1s",
"pytmc/tests/test_xml_collector.py::test_bool_naming[BOOL-0-FALSE-TRUE-True]",
"pytmc/tests/test_xml_collector.py::test_bool_naming[STRING-0-None-None-False]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_PREC[LREAL-0-3-True]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_PREC[STRING-0-None-False]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-o-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-i-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-o-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[BOOL-io-False-True-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-i-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-o-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[INT-io-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-i-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-o-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[DINT-io-False-True-LONG]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-i-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-o-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[ENUM-io-False-True-SHORT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-i-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-o-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[REAL-io-False-True-FLOAT]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-i-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-i-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-o-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[LREAL-io-False-True-DOUBLE]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-i-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-o-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_FTVL[STRING-io-True-False-CHAR]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[INT-0-False-False-None]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[INT-0-False-True-3]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[LREAL-0-False-True-9]",
"pytmc/tests/test_xml_collector.py::test_BaseRecordPackage_guess_NELM[STRING-0-True-False-81]",
"pytmc/tests/test_xml_collector.py::test_scalar",
"pytmc/tests/test_xml_collector.py::test_complex_array",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1,3]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[..1]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[1..]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[0..1,99..]",
"pytmc/tests/test_xml_collector.py::test_complex_array_partial[separate_prefixes]",
"pytmc/tests/test_xml_collector.py::test_enum_array",
"pytmc/tests/test_xml_collector.py::test_unroll_formatting",
"pytmc/tests/test_xml_collector.py::test_waveform_archive[over_threshold]",
"pytmc/tests/test_xml_collector.py::test_waveform_archive[under_threshold]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[0..1]",
"pytmc/tests/test_xml_collector.py::test_complex_sub_array_partial[1..3,5,7,9]",
"pytmc/tests/test_xml_collector.py::test_sub_io_change[no-pragma]",
"pytmc/tests/test_xml_collector.py::test_sub_io_change[change_to_io]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[float-scale-and-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-scale-and-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-no-offset]",
"pytmc/tests/test_xml_collector.py::test_scale_and_offset[int-no-scale]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-05-20 20:52:17+00:00
|
bsd-3-clause
| 4,476 |
|
pckroon__hypothesis-networkx-13
|
diff --git a/hypothesis_networkx/strategy.py b/hypothesis_networkx/strategy.py
index 5d29848..5d58020 100644
--- a/hypothesis_networkx/strategy.py
+++ b/hypothesis_networkx/strategy.py
@@ -111,36 +111,26 @@ def graph_builder(draw,
# Shrink towards high index, so shrink to the path graph. Otherwise
# it'll shrink to the star graph.
initial_edges = [draw(st.tuples(st.integers(-(n_idx-1), 0).map(lambda x: -x),
- st.just(n_idx),
- edge_data))
+ st.just(n_idx)))
for n_idx in range(1, len(graph))]
graph.add_edges_from(initial_edges)
- def edge_filter(idx, jdx):
- """
- Helper function to decide whether the edge between idx and jdx can still
- be added to graph.
- """
- # <= because self loops
- return ((not graph.has_edge(idx, jdx) or is_multigraph) and
- (idx <= jdx or is_directed) and
- (idx != jdx or self_loops))
-
- available_edges = [(idx, jdx) for jdx in graph for idx in graph
- if edge_filter(idx, jdx)]
-
# Now for the mess. The maximum number of edges possible depends on the
# graph type.
- if max_edges is not None:
- # Correct for number of edges already made if graph is connected.
- # This may mean we added more edges than originally allowed.
- max_edges -= len(graph.edges)
if not is_multigraph:
# Multi(Di)Graphs can make an infinite number of edges. For everything
# else we clamp the range to (0, max_possible_edges)
- max_possible_edges = len(available_edges)
+ max_possible_edges = len(graph) * (len(graph) - 1)
+ if is_directed:
+ max_possible_edges *= 2
+ if self_loops:
+ max_possible_edges += len(graph)
if max_edges is None or max_edges > max_possible_edges:
max_edges = max_possible_edges
+ if max_edges is not None:
+ # Correct for number of edges already made if graph is connected.
+ # This may mean we added more edges than originally allowed.
+ max_edges -= len(graph.edges)
if max_edges < 0:
max_edges = 0
@@ -152,35 +142,44 @@ def graph_builder(draw,
elif min_edges > max_edges:
min_edges = max_edges
+ def edge_filter(edge):
+ """
+ Helper function to decide whether the edge between idx and jdx can still
+ be added to graph.
+ """
+ # <= because self loops
+ idx, jdx = edge
+ return ((not graph.has_edge(idx, jdx) or is_multigraph) and
+ (idx <= jdx or is_directed) and
+ (idx != jdx or self_loops))
+
# We need to sample a number of items from options, these items are
# possibly not unique. In addition, we need to draw the same number of
# items from edge_data and associate the two. To top it off, uniqueness
# is defined by the content of the first element of the tuple.
- if is_multigraph:
- # This is the recommended way because it shrinks better, but it is
- # prohibitively slow if too many of the available edge have to be drawn,
- # and they need to be unique.
- # See https://github.com/HypothesisWorks/hypothesis/issues/1887
- edges = st.lists(st.tuples(st.sampled_from(available_edges), edge_data),
- # unique_by=None if is_multigraph else lambda e: e[:-1],
- min_size=min_edges,
- max_size=max_edges)
- graph.add_edges_from((e[0][0], e[0][1], e[1]) for e in draw(edges))
- else:
- # Not the recommended way, but way faster if edges have to be unique
- # Based on https://github.com/HypothesisWorks/hypothesis/issues/1393#issuecomment-409505039
- edges = []
- for _ in range(draw(st.integers(min_edges, max_edges))):
- idx, jdx = draw(st.sampled_from(available_edges))
- available_edges.remove((idx, jdx))
- edges.append((idx, jdx, draw(edge_data)))
- graph.add_edges_from(edges)
+ edges = st.lists(
+ st.tuples(
+ st.integers(min_value=0, max_value=len(graph) - 1),
+ st.integers(min_value=0, max_value=len(graph) - 1),
+ ).filter(edge_filter),
+ unique=not is_multigraph,
+ min_size=min_edges,
+ max_size=max_edges
+ )
+ graph.add_edges_from(draw(edges))
+
+ edge_datas = draw(st.lists(
+ edge_data,
+ min_size=len(graph.edges),
+ max_size=len(graph.edges))
+ )
+ for edge, data in zip(graph.edges, edge_datas):
+ graph.edges[edge].update(data)
if node_keys is not None:
- new_idxs = draw(st.lists(node_keys,
- unique=True,
- min_size=len(graph),
- max_size=len(graph)))
- graph = nx.relabel_nodes(graph, dict(zip(list(graph), new_idxs)))
+ new_idxs = draw(st.sets(node_keys,
+ min_size=len(graph),
+ max_size=len(graph)))
+ graph = nx.relabel_nodes(graph, dict(zip(list(graph), list(new_idxs))))
return graph
|
pckroon/hypothesis-networkx
|
618afe2993460cb1f641254274008350c7d15724
|
diff --git a/tests/test_graph_builder.py b/tests/test_graph_builder.py
index 7ddf052..5ca3504 100644
--- a/tests/test_graph_builder.py
+++ b/tests/test_graph_builder.py
@@ -157,7 +157,3 @@ def test_negative_max_edges(data):
assert len(graph.edges) == 0
assert len(graph.nodes) <= 5
-
-
-if __name__ == '__main__':
- test_graph_builder()
|
Deprecation warning about using empty data for sampled_from
```
____________________________________________________________________________________________ test_graph_builder ____________________________________________________________________________________________
@settings(max_examples=250, suppress_health_check=[HealthCheck.too_slow])
> @given(st.data())
def test_graph_builder(data):
"""
Make sure the number of nodes and edges of the generated graphs is correct,
and make sure that graphs that are supposed to be connected are.
../hypothesis-networkx/tests/test_graph_builder.py:30:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/core.py:606: in evaluate_test_data
result = self.execute(data)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/core.py:571: in execute
result = self.test_runner(data, run)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/executors.py:56: in default_new_style_executor
return function(data)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/core.py:569: in run
return test(*args, **kwargs)
../hypothesis-networkx/tests/test_graph_builder.py:30: in test_graph_builder
@given(st.data())
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/core.py:530: in test
result = self.test(*args, **kwargs)
../hypothesis-networkx/tests/test_graph_builder.py:69: in test_graph_builder
graph = data.draw(strategy)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/_strategies.py:2105: in draw
result = self.conjecture_data.draw(strategy)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/internal/conjecture/data.py:830: in draw
return self.__draw(strategy, label=label)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/internal/conjecture/data.py:845: in __draw
return strategy.do_draw(self)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:156: in do_draw
return data.draw(self.wrapped_strategy)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/internal/conjecture/data.py:830: in draw
return self.__draw(strategy, label=label)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/internal/conjecture/data.py:839: in __draw
return strategy.do_draw(self)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/_strategies.py:1872: in do_draw
return self.definition(data.draw, *self.args, **self.kwargs)
../hypothesis-networkx/hypothesis_networkx/strategy.py:168: in graph_builder
graph.add_edges_from((e[0][0], e[0][1], e[1]) for e in draw(edges))
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/internal/conjecture/data.py:824: in draw
if strategy.is_empty:
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/strategies.py:154: in accept
recur(self)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/strategies.py:150: in recur
mapping[strat] = getattr(strat, calculation)(recur)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:92: in calc_is_empty
return recur(self.wrapped_strategy)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:107: in wrapped_strategy
unwrapped_args = tuple(unwrap_strategies(s) for s in self.__args)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:107: in <genexpr>
unwrapped_args = tuple(unwrap_strategies(s) for s in self.__args)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:51: in unwrap_strategies
result = unwrap_strategies(s.wrapped_strategy)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:107: in wrapped_strategy
unwrapped_args = tuple(unwrap_strategies(s) for s in self.__args)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:107: in <genexpr>
unwrapped_args = tuple(unwrap_strategies(s) for s in self.__args)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:51: in unwrap_strategies
result = unwrap_strategies(s.wrapped_strategy)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/searchstrategy/lazy.py:112: in wrapped_strategy
base = self.function(*self.__args, **self.__kwargs)
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/_strategies.py:700: in sampled_from
since="2019-03-12",
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
message = 'sampled_from() with nothing to sample is deprecated and will be an error in a future version. It currently returns `st.nothing()`, which if unexpected can make parts of a strategy silently vanish.'
since = '2019-03-12', verbosity = Verbosity.normal
def note_deprecation(message, since, verbosity=None):
# type: (str, str, Verbosity) -> None
if verbosity is None:
verbosity = settings.default.verbosity
assert verbosity is not None
if since != "RELEASEDAY":
date = datetime.datetime.strptime(since, "%Y-%m-%d").date()
assert datetime.date(2016, 1, 1) <= date
warning = HypothesisDeprecationWarning(message)
if verbosity > Verbosity.quiet:
> warnings.warn(warning, stacklevel=2)
E hypothesis.errors.HypothesisDeprecationWarning: sampled_from() with nothing to sample is deprecated and will be an error in a future version. It currently returns `st.nothing()`, which if unexpected can make parts of a strategy silently vanish.
../../../.cache/pypoetry/virtualenvs/bootsteps-py3.7/lib/python3.7/site-packages/hypothesis/_settings.py:801: HypothesisDeprecationWarning
```
|
0.0
|
618afe2993460cb1f641254274008350c7d15724
|
[
"tests/test_graph_builder.py::test_graph_builder"
] |
[
"tests/test_graph_builder.py::test_node_edge_data",
"tests/test_graph_builder.py::test_node_keys",
"tests/test_graph_builder.py::test_negative_max_edges"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-10 13:01:48+00:00
|
apache-2.0
| 4,477 |
|
pckroon__pysmiles-13
|
diff --git a/README.md b/README.md
index c8b693c..23e7b17 100644
--- a/README.md
+++ b/README.md
@@ -53,6 +53,25 @@ element attribute.
of 1.5. If `False`, will create a molecule using *only* the information in
the SMILES string.
+### Stereochemical information
+Currently the library cannot handle stereochemical information, neither E/Z nor
+R/S. Any stereochemical information that was in the SMILES string will be
+*discarded* upon parsing. This means there will be no difference between
+parsing *e.g.* `N[C@](Br)(O)C`, `N[C@@](Br)(O)C` and `NC(Br)(O)C`. Parsing
+these *will result in the same molecule*. The same holds for *e.g.* `F/C=C/F`
+and `FC=CF`. These will result in the same molecule.
+
+Whenever stereochemical information is being discarded a warning will be
+logged using the built-in `logging` module. If you want to disable all the
+messages logged by `pysmiles` you can add the following snippet to your code,
+without interfering with any logging by your own code:
+
+```python
+import logging
+logging.getLogger('pysmiles').setLevel(logging.CRITICAL) # Anything higher than warning
+```
+
+
## Writing SMILES
The function `write_smiles(molecule, default_element='*', start=None)` can be
used to write SMILES strings from a molecule. The function does *not* check
@@ -196,7 +215,6 @@ print(write_smiles(mol))
- `correct_aromatic_rings` is fragile.
- There is currently no way of specifying stereo chemical information. The
parser can deal with it, but it will be discarded.
-- It is not on PyPI
- It only processes SMILES. This might later be extended to e.g. InChi, SLN,
SMARTS, etc.
diff --git a/pysmiles/read_smiles.py b/pysmiles/read_smiles.py
index 1d55882..f4bf6e0 100644
--- a/pysmiles/read_smiles.py
+++ b/pysmiles/read_smiles.py
@@ -18,6 +18,7 @@ Exposes functionality needed for parsing SMILES strings.
"""
import enum
+import logging
import networkx as nx
@@ -25,6 +26,7 @@ from .smiles_helper import (add_explicit_hydrogens, remove_explicit_hydrogens,
parse_atom, fill_valence, mark_aromatic_edges,
mark_aromatic_atoms)
+LOGGER = logging.getLogger(__name__)
@enum.unique
class TokenType(enum.Enum):
@@ -175,7 +177,7 @@ def read_smiles(smiles, explicit_hydrogen=False, zero_order_bonds=True,
ring_nums[token] = (idx - 1, next_bond)
next_bond = None
elif tokentype == TokenType.EZSTEREO:
- print("I can't deal with stereo yet...")
+ LOGGER.warning('E/Z stereochemical information, which is specified by "%s", will be discarded', token)
if ring_nums:
raise KeyError('Unmatched ring indices {}'.format(list(ring_nums.keys())))
diff --git a/pysmiles/smiles_helper.py b/pysmiles/smiles_helper.py
index cd78eb0..fb59432 100644
--- a/pysmiles/smiles_helper.py
+++ b/pysmiles/smiles_helper.py
@@ -18,11 +18,14 @@ Contains helper functions for parsing and writing SMILES strings, as well as
some convenience functions for adding hydrogens, and detecting aromaticity.
"""
+import logging
import re
import operator
import networkx as nx
+LOGGER = logging.getLogger(__name__)
+
ISOTOPE_PATTERN = r'(?P<isotope>[\d]+)?'
ELEMENT_PATTERN = r'(?P<element>b|c|n|o|s|p|\*|[A-Z][a-z]{0,2})'
STEREO_PATTERN = r'(?P<stereo>@|@@|@TH[1-2]|@AL[1-2]|@SP[1-3]|@OH[\d]{1,2}|'\
@@ -98,7 +101,7 @@ def parse_atom(atom):
raise ValueError("A hydrogen atom can't have hydrogens")
if 'stereo' in out:
- print("I don't quite know how to handle stereo yet...")
+ LOGGER.warning('Atom "%s" contains stereochemical information that will be discarded.', atom)
return out
|
pckroon/pysmiles
|
46dd2273dd79967b5c38bc92bc72b58e9745a66a
|
diff --git a/tests/test_read_smiles.py b/tests/test_read_smiles.py
index 51390d0..d85e718 100644
--- a/tests/test_read_smiles.py
+++ b/tests/test_read_smiles.py
@@ -497,12 +497,16 @@ def test_invalid_smiles(smiles, error_type):
read_smiles(smiles)
-def test_cis_trans():
- smiles = r'F/C=C/F', r'C(\F)=C/F', r'F\C=C/F', r'C(/F)=C/F'
- for smile in smiles:
- read_smiles(smile, explicit_hydrogen=False)
-
-
-def test_extended_stereo():
- smiles = 'NC(Br)=[C@]=C(O)C'
- read_smiles(smiles)
[email protected]('smiles, n_records',[
+ (r'F/C=C/F', 2),
+ (r'C(\F)=C/F', 2),
+ (r'F\C=C/F', 2),
+ (r'C(/F)=C/F', 2),
+ ('NC(Br)=[C@]=C(O)C', 1),
+ ('c1ccccc1', 0)
+])
+def test_stereo_logging(caplog, smiles, n_records):
+ read_smiles(smiles, explicit_hydrogen=False)
+ assert len(caplog.records) == n_records
+ for record in caplog.records:
+ assert record.levelname == "WARNING"
|
could you not PRINT warnings?
maybe it is better to write to stderr or at least offer an option to shut off the warnings like "I can't deal with stereo yet..."
|
0.0
|
46dd2273dd79967b5c38bc92bc72b58e9745a66a
|
[
"tests/test_read_smiles.py::test_stereo_logging[F/C=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[C(\\\\F)=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[F\\\\C=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[C(/F)=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[NC(Br)=[C@]=C(O)C-1]"
] |
[
"tests/test_read_smiles.py::test_read_smiles[CCCC-node_data0-edge_data0-False]",
"tests/test_read_smiles.py::test_read_smiles[CCC(CC)CC-node_data1-edge_data1-False]",
"tests/test_read_smiles.py::test_read_smiles[C=C-node_data2-edge_data2-False]",
"tests/test_read_smiles.py::test_read_smiles[C1CC1-node_data3-edge_data3-False]",
"tests/test_read_smiles.py::test_read_smiles[C%11CC%11-node_data4-edge_data4-False]",
"tests/test_read_smiles.py::test_read_smiles[c1ccccc1-node_data5-edge_data5-False]",
"tests/test_read_smiles.py::test_read_smiles[C1CC=1-node_data6-edge_data6-False]",
"tests/test_read_smiles.py::test_read_smiles[C=1CC=1-node_data7-edge_data7-False]",
"tests/test_read_smiles.py::test_read_smiles[C=1CC1-node_data8-edge_data8-False]",
"tests/test_read_smiles.py::test_read_smiles[C%11CC=%11-node_data9-edge_data9-False]",
"tests/test_read_smiles.py::test_read_smiles[OS(=O)(=S)O-node_data10-edge_data10-False]",
"tests/test_read_smiles.py::test_read_smiles[C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C))))))))))))))))))))C-node_data11-edge_data11-False]",
"tests/test_read_smiles.py::test_read_smiles[N1CC2CCCC2CC1-node_data12-edge_data12-False]",
"tests/test_read_smiles.py::test_read_smiles[c1ccc2ccccc2c1-node_data13-edge_data13-True]",
"tests/test_read_smiles.py::test_read_smiles[C12(CCCCC1)CCCCC2-node_data14-edge_data14-False]",
"tests/test_read_smiles.py::test_read_smiles[[H]C([H])([H])[H]-node_data15-edge_data15-False]",
"tests/test_read_smiles.py::test_read_smiles[[H]O([H])[H]O([H])[H]-node_data16-edge_data16-False]",
"tests/test_read_smiles.py::test_read_smiles[[H]=C-node_data17-edge_data17-False]",
"tests/test_read_smiles.py::test_read_smiles[[H][H]-node_data18-edge_data18-False]",
"tests/test_read_smiles.py::test_read_smiles[[13CH3-1:1]-node_data19-edge_data19-False]",
"tests/test_read_smiles.py::test_read_smiles[[013CH3-:1]-node_data20-edge_data20-False]",
"tests/test_read_smiles.py::test_read_smiles[[Cu++]-node_data21-edge_data21-False]",
"tests/test_read_smiles.py::test_read_smiles[[Cu+2]-node_data22-edge_data22-False]",
"tests/test_read_smiles.py::test_read_smiles[[Uuo+4]-node_data23-edge_data23-False]",
"tests/test_read_smiles.py::test_read_smiles[[2H][CH2]-node_data24-edge_data24-False]",
"tests/test_read_smiles.py::test_read_smiles[*-node_data25-edge_data25-False]",
"tests/test_read_smiles.py::test_read_smiles[[*--]-node_data26-edge_data26-False]",
"tests/test_read_smiles.py::test_read_smiles[[*-]-node_data27-edge_data27-False]",
"tests/test_read_smiles.py::test_read_smiles[[H+]-node_data28-edge_data28-False]",
"tests/test_read_smiles.py::test_read_smiles[[cH-1]1[cH-1][cH-1]1-node_data29-edge_data29-False]",
"tests/test_read_smiles.py::test_read_smiles[[Rh-](Cl)(Cl)(Cl)(Cl)$[Rh-](Cl)(Cl)(Cl)Cl-node_data30-edge_data30-False]",
"tests/test_read_smiles.py::test_read_smiles[c1occc1-node_data31-edge_data31-True]",
"tests/test_read_smiles.py::test_read_smiles[[O-]C(O)CN-node_data32-edge_data32-True]",
"tests/test_read_smiles.py::test_read_smiles[[*+]1[*][*]1-node_data33-edge_data33-False]",
"tests/test_read_smiles.py::test_read_smiles[N1[*][*]1-node_data34-edge_data34-False]",
"tests/test_read_smiles.py::test_invalid_smiles[[CL-]-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[[HH]-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[c1ccccc2-KeyError]",
"tests/test_read_smiles.py::test_invalid_smiles[C%1CC1-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[c1c1CC-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[CC11C-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[1CCC1-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[cccccc-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[C=1CC-1-ValueError]",
"tests/test_read_smiles.py::test_stereo_logging[c1ccccc1-0]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-25 12:05:35+00:00
|
apache-2.0
| 4,478 |
|
pckroon__pysmiles-24
|
diff --git a/pysmiles/smiles_helper.py b/pysmiles/smiles_helper.py
index fb59432..86b05e3 100644
--- a/pysmiles/smiles_helper.py
+++ b/pysmiles/smiles_helper.py
@@ -27,7 +27,7 @@ import networkx as nx
LOGGER = logging.getLogger(__name__)
ISOTOPE_PATTERN = r'(?P<isotope>[\d]+)?'
-ELEMENT_PATTERN = r'(?P<element>b|c|n|o|s|p|\*|[A-Z][a-z]{0,2})'
+ELEMENT_PATTERN = r'(?P<element>b|c|n|o|s|p|as|se|\*|[A-Z][a-z]{0,2})'
STEREO_PATTERN = r'(?P<stereo>@|@@|@TH[1-2]|@AL[1-2]|@SP[1-3]|@OH[\d]{1,2}|'\
r'@TB[\d]{1,2})?'
HCOUNT_PATTERN = r'(?P<hcount>H[\d]?)?'
|
pckroon/pysmiles
|
a3d0d19e14834218059fed5cc3a9e4c561c35ce4
|
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 33e1666..aa08857 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -19,7 +19,7 @@ jobs:
strategy:
matrix:
- py_version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
+ py_version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
steps:
- uses: actions/checkout@v2
diff --git a/tests/test_read_smiles.py b/tests/test_read_smiles.py
index d85e718..73196d3 100644
--- a/tests/test_read_smiles.py
+++ b/tests/test_read_smiles.py
@@ -427,6 +427,52 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
(4, 8, {'order': 1})],
True
),
+ (
+ 'c1[asH]ccc1',
+ [(0, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (1, {'charge': 0, 'element': 'As', 'aromatic': True}),
+ (2, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (3, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (4, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (5, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (6, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (7, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (8, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (9, {'charge': 0, 'element': 'H', 'aromatic': False})],
+ [(0, 1, {'order': 1.5}),
+ (0, 4, {'order': 1.5}),
+ (0, 5, {'order': 1}),
+ (1, 2, {'order': 1.5}),
+ (2, 3, {'order': 1.5}),
+ (2, 6, {'order': 1}),
+ (3, 4, {'order': 1.5}),
+ (3, 7, {'order': 1}),
+ (4, 8, {'order': 1}),
+ (1, 9, {'order': 1}),],
+ True
+ ),
+ (
+ 'c1[se]ccc1',
+ [(0, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (1, {'charge': 0, 'element': 'Se', 'aromatic': True}),
+ (2, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (3, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (4, {'charge': 0, 'element': 'C', 'aromatic': True}),
+ (5, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (6, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (7, {'charge': 0, 'element': 'H', 'aromatic': False}),
+ (8, {'charge': 0, 'element': 'H', 'aromatic': False})],
+ [(0, 1, {'order': 1.5}),
+ (0, 4, {'order': 1.5}),
+ (0, 5, {'order': 1}),
+ (1, 2, {'order': 1.5}),
+ (2, 3, {'order': 1.5}),
+ (2, 6, {'order': 1}),
+ (3, 4, {'order': 1.5}),
+ (3, 7, {'order': 1}),
+ (4, 8, {'order': 1})],
+ True
+ ),
(
'[O-]C(O)CN',
[(0, {'charge': -1, 'element': 'O', 'aromatic': False}),
|
`[se]` and `[as]` are not properly recognized
While `se` and `as` can appear as aromatics if they are placed inside square brackets, however it seems this library fails to parse them
|
0.0
|
a3d0d19e14834218059fed5cc3a9e4c561c35ce4
|
[
"tests/test_read_smiles.py::test_read_smiles[c1[asH]ccc1-node_data32-edge_data32-True]",
"tests/test_read_smiles.py::test_read_smiles[c1[se]ccc1-node_data33-edge_data33-True]"
] |
[
"tests/test_read_smiles.py::test_read_smiles[CCCC-node_data0-edge_data0-False]",
"tests/test_read_smiles.py::test_read_smiles[CCC(CC)CC-node_data1-edge_data1-False]",
"tests/test_read_smiles.py::test_read_smiles[C=C-node_data2-edge_data2-False]",
"tests/test_read_smiles.py::test_read_smiles[C1CC1-node_data3-edge_data3-False]",
"tests/test_read_smiles.py::test_read_smiles[C%11CC%11-node_data4-edge_data4-False]",
"tests/test_read_smiles.py::test_read_smiles[c1ccccc1-node_data5-edge_data5-False]",
"tests/test_read_smiles.py::test_read_smiles[C1CC=1-node_data6-edge_data6-False]",
"tests/test_read_smiles.py::test_read_smiles[C=1CC=1-node_data7-edge_data7-False]",
"tests/test_read_smiles.py::test_read_smiles[C=1CC1-node_data8-edge_data8-False]",
"tests/test_read_smiles.py::test_read_smiles[C%11CC=%11-node_data9-edge_data9-False]",
"tests/test_read_smiles.py::test_read_smiles[OS(=O)(=S)O-node_data10-edge_data10-False]",
"tests/test_read_smiles.py::test_read_smiles[C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C(C))))))))))))))))))))C-node_data11-edge_data11-False]",
"tests/test_read_smiles.py::test_read_smiles[N1CC2CCCC2CC1-node_data12-edge_data12-False]",
"tests/test_read_smiles.py::test_read_smiles[c1ccc2ccccc2c1-node_data13-edge_data13-True]",
"tests/test_read_smiles.py::test_read_smiles[C12(CCCCC1)CCCCC2-node_data14-edge_data14-False]",
"tests/test_read_smiles.py::test_read_smiles[[H]C([H])([H])[H]-node_data15-edge_data15-False]",
"tests/test_read_smiles.py::test_read_smiles[[H]O([H])[H]O([H])[H]-node_data16-edge_data16-False]",
"tests/test_read_smiles.py::test_read_smiles[[H]=C-node_data17-edge_data17-False]",
"tests/test_read_smiles.py::test_read_smiles[[H][H]-node_data18-edge_data18-False]",
"tests/test_read_smiles.py::test_read_smiles[[13CH3-1:1]-node_data19-edge_data19-False]",
"tests/test_read_smiles.py::test_read_smiles[[013CH3-:1]-node_data20-edge_data20-False]",
"tests/test_read_smiles.py::test_read_smiles[[Cu++]-node_data21-edge_data21-False]",
"tests/test_read_smiles.py::test_read_smiles[[Cu+2]-node_data22-edge_data22-False]",
"tests/test_read_smiles.py::test_read_smiles[[Uuo+4]-node_data23-edge_data23-False]",
"tests/test_read_smiles.py::test_read_smiles[[2H][CH2]-node_data24-edge_data24-False]",
"tests/test_read_smiles.py::test_read_smiles[*-node_data25-edge_data25-False]",
"tests/test_read_smiles.py::test_read_smiles[[*--]-node_data26-edge_data26-False]",
"tests/test_read_smiles.py::test_read_smiles[[*-]-node_data27-edge_data27-False]",
"tests/test_read_smiles.py::test_read_smiles[[H+]-node_data28-edge_data28-False]",
"tests/test_read_smiles.py::test_read_smiles[[cH-1]1[cH-1][cH-1]1-node_data29-edge_data29-False]",
"tests/test_read_smiles.py::test_read_smiles[[Rh-](Cl)(Cl)(Cl)(Cl)$[Rh-](Cl)(Cl)(Cl)Cl-node_data30-edge_data30-False]",
"tests/test_read_smiles.py::test_read_smiles[c1occc1-node_data31-edge_data31-True]",
"tests/test_read_smiles.py::test_read_smiles[[O-]C(O)CN-node_data34-edge_data34-True]",
"tests/test_read_smiles.py::test_read_smiles[[*+]1[*][*]1-node_data35-edge_data35-False]",
"tests/test_read_smiles.py::test_read_smiles[N1[*][*]1-node_data36-edge_data36-False]",
"tests/test_read_smiles.py::test_invalid_smiles[[CL-]-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[[HH]-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[c1ccccc2-KeyError]",
"tests/test_read_smiles.py::test_invalid_smiles[C%1CC1-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[c1c1CC-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[CC11C-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[1CCC1-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[cccccc-ValueError]",
"tests/test_read_smiles.py::test_invalid_smiles[C=1CC-1-ValueError]",
"tests/test_read_smiles.py::test_stereo_logging[F/C=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[C(\\\\F)=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[F\\\\C=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[C(/F)=C/F-2]",
"tests/test_read_smiles.py::test_stereo_logging[NC(Br)=[C@]=C(O)C-1]",
"tests/test_read_smiles.py::test_stereo_logging[c1ccccc1-0]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-02 10:22:23+00:00
|
apache-2.0
| 4,479 |
|
pckroon__pysmiles-28
|
diff --git a/pysmiles/read_smiles.py b/pysmiles/read_smiles.py
index f4bf6e0..79e9634 100644
--- a/pysmiles/read_smiles.py
+++ b/pysmiles/read_smiles.py
@@ -103,9 +103,12 @@ def read_smiles(smiles, explicit_hydrogen=False, zero_order_bonds=True,
The SMILES string to parse. Should conform to the OpenSMILES
specification.
explicit_hydrogen : bool
- Whether hydrogens should be explicit nodes in the outout graph, or be
+ Whether hydrogens should be explicit nodes in the output graph, or be
implicit in 'hcount' attributes.
- reinterprit_aromatic : bool
+ zero_order_bonds : bool
+ Whether zero-order bonds (".") should be added as edges with an order of
+ 0.
+ reinterpret_aromatic : bool
Whether aromaticity should be determined from the created molecule,
instead of taken from the SMILES string.
diff --git a/pysmiles/smiles_helper.py b/pysmiles/smiles_helper.py
index 86b05e3..0c13c19 100644
--- a/pysmiles/smiles_helper.py
+++ b/pysmiles/smiles_helper.py
@@ -141,8 +141,8 @@ def format_atom(molecule, node_key, default_element='*'):
if aromatic:
name = name.lower()
- if (stereo is None and isotope == '' and charge == 0 and default_h and
- class_ == '' and name.lower() in 'b c n o p s se as *'.split()):
+ if (stereo is None and isotope == '' and charge == 0 and default_h and class_ == '' and
+ (name.lower() in 'b c n o p s *'.split() or name in 'F Cl Br I'.split())):
return name
if hcount:
diff --git a/pysmiles/write_smiles.py b/pysmiles/write_smiles.py
index ea869dc..4263701 100644
--- a/pysmiles/write_smiles.py
+++ b/pysmiles/write_smiles.py
@@ -18,11 +18,13 @@ Exposes functionality for writing SMILES strings
"""
from collections import defaultdict
+import logging
import networkx as nx
from .smiles_helper import remove_explicit_hydrogens, format_atom
+LOGGER = logging.getLogger(__name__)
def _get_ring_marker(used_markers):
"""
@@ -74,10 +76,10 @@ def _write_edge_symbol(molecule, n_idx, n_jdx):
return cross_aromatic or not (aromatic_bond or single_bond)
-def write_smiles(molecule, default_element='*', start=None):
+def write_smiles_component(molecule, default_element='*', start=None):
"""
Creates a SMILES string describing `molecule` according to the OpenSMILES
- standard.
+ standard. `molecule` should be a single connected component.
Parameters
----------
@@ -187,3 +189,43 @@ def write_smiles(molecule, default_element='*', start=None):
smiles += ')' * branch_depth
return smiles
+
+def write_smiles(molecule, default_element='*', start=None):
+ """
+ Creates a SMILES string describing `molecule` according to the OpenSMILES
+ standard. If `molecule` consists of multiple disconnected components their
+ corresponding SMILES string will be joined by zero-order bonds (".").
+
+ Parameters
+ ----------
+ molecule : nx.Graph
+ The molecule for which a SMILES string should be generated.
+ default_element : str
+ The element to write if the attribute is missing for a node.
+ start : Hashable
+ The atom at which the depth first traversal of the molecule should
+ start. A sensible one is chosen: preferably a terminal heteroatom.
+
+ Returns
+ -------
+ str
+ The SMILES string describing `molecule`.
+ """
+ smiles = []
+ components = list(nx.connected_components(molecule))
+ try:
+ components = sorted(components, key=lambda c: sorted(c))
+ except TypeError:
+ pass
+ for nodes in components:
+ if start is not None and start in nodes:
+ start_ = start
+ else:
+ start_ = None
+ smiles.append(write_smiles_component(molecule.subgraph(nodes),
+ default_element=default_element, start=start_))
+ if len(smiles) > 1:
+ LOGGER.info('Be aware the specified molecule is disconnected and '
+ 'consists of %d connected components.', len(smiles))
+ return '.'.join(smiles)
+
|
pckroon/pysmiles
|
e557bbba1815dc2d3279513b9f71a2603c7f34a0
|
diff --git a/tests/test_write_smiles.py b/tests/test_write_smiles.py
index 1b5fa97..373b037 100644
--- a/tests/test_write_smiles.py
+++ b/tests/test_write_smiles.py
@@ -19,7 +19,7 @@ from pysmiles import write_smiles, read_smiles
from pysmiles.testhelper import assertEqualGraphs, make_mol
[email protected]('node_data, edge_data, expl_h', (
[email protected]('node_data, edge_data, kwargs', (
(
[(0, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 3}),
(1, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 2}),
@@ -28,7 +28,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
[(0, 1, {'order': 1}),
(1, 2, {'order': 1}),
(2, 3, {'order': 1})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=True),
),
(
[(0, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 3}),
@@ -38,7 +38,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
[(0, 1, {'order': 1}),
(1, 2, {'order': 1}),
(1, 3, {'order': 1})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=True),
),
(
[(0, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 3}),
@@ -50,7 +50,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
(1, 2, {'order': 1}),
(2, 3, {'order': 1}),
(3, 4, {'order': 2})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=True),
),
(
[(0, {'element': 'C', 'charge': 0, 'aromatic': True, 'hcount': 1}),
@@ -67,7 +67,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
(4, 5, {'order': 1.5}),
(5, 0, {'order': 1.5}),
(3, 6, {'order': 1})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=False),
),
(
[(0, {'element': 'C', 'charge': 0, 'aromatic': True, 'hcount': 1}),
@@ -84,7 +84,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
(4, 5, {'order': 1.5}),
(5, 0, {'order': 1.5}),
(3, 6, {'order': 2})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=False),
),
(
[(0, {'element': 'C', 'charge': 0, 'aromatic': True, 'hcount': 1}),
@@ -108,7 +108,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
(8, 9, {'order': 1.5}),
(9, 0, {'order': 1.5}),
(2, 7, {'order': 1.5})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=True),
),
(
[(0, {'element': 'C', 'charge': -1, 'aromatic': False, 'hcount': 3}),
@@ -118,7 +118,7 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
[(0, 1, {'order': 1}),
(1, 2, {'order': 1}),
(2, 3, {'order': 1})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=True),
),
(
[(0, {'element': 'C', 'charge': -1, 'aromatic': False, 'hcount': 3}),
@@ -128,11 +128,34 @@ from pysmiles.testhelper import assertEqualGraphs, make_mol
[(0, 1, {'order': 1}),
(1, 2, {'order': 1}),
(2, 3, {'order': 1})],
- False
+ dict(explicit_hydrogen=False, zero_order_bonds=True, reinterpret_aromatic=True),
+ ),
+ (
+ [(0, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 4}),
+ (1, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 4})],
+ [],
+ dict(explicit_hydrogen=False, zero_order_bonds=False, reinterpret_aromatic=True),
+ ),
+ (
+ [('a', {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 4}),
+ (1, {'element': 'C', 'charge': 0, 'aromatic': False, 'hcount': 4})],
+ [],
+ dict(explicit_hydrogen=False, zero_order_bonds=False, reinterpret_aromatic=True),
+ ),
+ (
+ [(0, {'element': 'Se', 'charge': 0, 'aromatic': False, 'hcount': 0})],
+ [],
+ False
+ ),
+ (
+ [(0, {'element': 'As', 'charge': 0, 'aromatic': False, 'hcount': 0})],
+ [],
+ False
),
))
-def test_write_smiles(node_data, edge_data, expl_h):
+def test_write_smiles(node_data, edge_data, kwargs):
mol = make_mol(node_data, edge_data)
smiles = write_smiles(mol)
- found = read_smiles(smiles, explicit_hydrogen=expl_h, reinterpret_aromatic=False)
+ print(smiles)
+ found = read_smiles(smiles, **kwargs)
assertEqualGraphs(mol, found)
|
Inconsistent writing and reading of mono-atomic smiles for Se and As
If we have a graph featuring a single non-organic atom like Se, this will be outputted by the smiles writer as 'Se'. According to smiles rules, I would have expected '[Se]'. And then, if we again read in the smile 'Se' with the smiles reader, it will fail and output a graph featuring 'S', because it relies on the brackets to recognise non-organic elements.
I don't know if this is an issue with the smiles reader or writer, but they are inconsistent.
|
0.0
|
e557bbba1815dc2d3279513b9f71a2603c7f34a0
|
[
"tests/test_write_smiles.py::test_write_smiles[node_data9-edge_data9-kwargs9]"
] |
[
"tests/test_write_smiles.py::test_write_smiles[node_data0-edge_data0-kwargs0]",
"tests/test_write_smiles.py::test_write_smiles[node_data1-edge_data1-kwargs1]",
"tests/test_write_smiles.py::test_write_smiles[node_data2-edge_data2-kwargs2]",
"tests/test_write_smiles.py::test_write_smiles[node_data3-edge_data3-kwargs3]",
"tests/test_write_smiles.py::test_write_smiles[node_data4-edge_data4-kwargs4]",
"tests/test_write_smiles.py::test_write_smiles[node_data5-edge_data5-kwargs5]",
"tests/test_write_smiles.py::test_write_smiles[node_data6-edge_data6-kwargs6]",
"tests/test_write_smiles.py::test_write_smiles[node_data7-edge_data7-kwargs7]",
"tests/test_write_smiles.py::test_write_smiles[node_data8-edge_data8-kwargs8]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-09 11:07:50+00:00
|
apache-2.0
| 4,480 |
|
pcubillos__bibmanager-111
|
diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index f4cd8cb..e4a7740 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -2671,3 +2671,15 @@ Bumped bibmanager version to 1.4.5.
Updated bibtex entry in docs and README.
+
+***** Sun Oct 23 13:06:26 CEST 2022 *****
+
+Updated bm.filter_field() to detect when entries with same isbn
+have different dois (and allow that case).
+Updated tests with new edge cases.
+
+*****
+
+Bumped bibmanager to version 1.4.6.
+When merged, this resolves #108
+
diff --git a/bibmanager/bib_manager/bib_manager.py b/bibmanager/bib_manager/bib_manager.py
index 76d5035..86ed497 100644
--- a/bibmanager/bib_manager/bib_manager.py
+++ b/bibmanager/bib_manager/bib_manager.py
@@ -52,8 +52,10 @@ from ..__init__ import __version__
# Some constant definitions:
lexer = prompt_toolkit.lexers.PygmentsLexer(BibTeXLexer)
-months = {"jan":1, "feb":2, "mar":3, "apr": 4, "may": 5, "jun":6,
- "jul":7, "aug":8, "sep":9, "oct":10, "nov":11, "dec":12}
+months = {
+ "jan":1, "feb":2, "mar":3, "apr": 4, "may": 5, "jun":6,
+ "jul":7, "aug":8, "sep":9, "oct":10, "nov":11, "dec":12,
+}
class Bib(object):
@@ -184,7 +186,8 @@ class Bib(object):
last, first, von, jr = None, None, None, None
self.sort_author = u.Sort_author(
- last, first, von, jr, self.year, self.month)
+ last, first, von, jr, self.year, self.month,
+ )
def update_content(self, other):
"""Update the bibtex content of self with that of other."""
@@ -336,7 +339,8 @@ class Bib(object):
s.von == o.von and
s.jr == o.jr and
s.year == o.year and
- s.month == o.month)
+ s.month == o.month
+ )
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
@@ -502,7 +506,6 @@ def display_list(bibs, verb=-1):
output=create_output(sys.stdout))
-
def remove_duplicates(bibs, field):
"""
Look for duplicates (within a same list of entries) by field and
@@ -556,13 +559,15 @@ def remove_duplicates(bibs, field):
if field == 'isbn':
dois = [
bibs[idx].doi if bibs[idx].doi is not None else ""
- for idx in indices]
- u_doi, doi_inv, doi_counts = np.unique(
- dois, return_inverse=True, return_counts=True)
+ for idx in indices
+ ]
+ u_doi, doi_counts = np.unique(dois, return_counts=True)
single_dois = u_doi[doi_counts==1]
indices = [
- idx for idx,doi in zip(indices,dois)
- if doi not in single_dois]
+ idx
+ for idx,doi in zip(indices,dois)
+ if doi not in single_dois
+ ]
nbibs = len(indices)
if nbibs <= 1:
continue
@@ -611,6 +616,20 @@ def filter_field(bibs, new, field, take):
if getattr(bib,field) is None or getattr(bib,field) not in fields:
continue
idx = fields.index(getattr(bib,field))
+ # There could be entries with same ISBN but different DOI:
+ if field == 'isbn':
+ new_doi = '' if bib.doi is None else bib.doi
+ dois = [
+ '' if bib.doi is None else bib.doi
+ for bib in bibs
+ ]
+ really_isbn_duplicates = [
+ isbn == bib.isbn and doi == new_doi
+ for isbn,doi in zip(fields,dois)
+ ]
+ if not np.any(really_isbn_duplicates):
+ continue
+ idx = np.where(really_isbn_duplicates)[0][0]
# Replace if duplicated and new has newer bibcode:
if bib.published() > bibs[idx].published() or take == 'new':
bibs[idx].update_content(bib)
diff --git a/bibmanager/version.py b/bibmanager/version.py
index 3bedc93..3837de7 100644
--- a/bibmanager/version.py
+++ b/bibmanager/version.py
@@ -2,4 +2,4 @@
# bibmanager is open-source software under the MIT license (see LICENSE).
# bibmanager Version:
-__version__ = '1.4.5'
+__version__ = '1.4.6'
|
pcubillos/bibmanager
|
d301dfbad690af4f4744fd5afee16904729e7216
|
diff --git a/tests/test_bib_manager.py b/tests/test_bib_manager.py
index b2aaf5c..1c86a71 100644
--- a/tests/test_bib_manager.py
+++ b/tests/test_bib_manager.py
@@ -320,8 +320,7 @@ def test_Bib_warning_year():
"Bad year format value '200X' for entry 'JonesEtal2001scipy'"
[email protected]('month',
- ['15', 'tuesday',])
[email protected]('month', ['15', 'tuesday',])
def test_Bib_warning_month(month):
e = '''@Misc{JonesEtal2001scipy,
author = {Eric Jones},
@@ -949,6 +948,72 @@ def test_merge_duplicate_title_add(bibs, mock_init_sample, mock_input):
assert bibs['slipher_guy'] in loaded_bibs
+# bm.filter_field() is actually getting the job done here
[email protected]('doi', ['', 'doi = {10.1007/978-3-319-21846-5}'])
+def test_merge_duplicate_isbn_same_doi(doi):
+ base_bibs = [bm.Bib(
+ """@incollection{OConnor2017,
+ title = "{The Core-Collapse Supernova-Black Hole Connection}",
+ author = "O'Connor, Evan",
+ year = "2016",
+ booktitle = "Handbook of Supernovae",
+ isbn = "9783319218465",
+ DOI
+ }""".replace('DOI', doi)
+ )]
+ new_bibs = [bm.Bib(
+ """@incollection{Alsabti2016,
+ title = {{Supernovae and Supernova Remnants: The Big Picture in Low Resolution}},
+ author = {Alsabti, Athem W. and Murdin, Paul},
+ year = {2017},
+ booktitle = {Handbook of Supernovae},
+ isbn = {9783319218465},
+ DOI
+ }""".replace('DOI', doi)
+ )]
+ merged = bm.merge(base=base_bibs, new=new_bibs)
+
+ assert len(merged) == 1
+ assert merged[0] == base_bibs[0]
+
+
+# bm.filter_field() is actually getting the job done here
[email protected](
+ 'dois',
+ [
+ ('doi = "10.1007/978-3-319-20794-0"', 'doi = {10.1007/978-3-319-21846-5}'),
+ ('doi = "10.1007/978-3-319-20794-0"', ''),
+ ('', 'doi = {10.1007/978-3-319-21846-5}')
+ ]
+)
+def test_merge_duplicate_isbn_different_doi(dois):
+ base_bibs = [bm.Bib(
+ """@incollection{OConnor2017,
+ title = "{The Core-Collapse Supernova-Black Hole Connection}",
+ author = "O'Connor, Evan",
+ year = "2016",
+ booktitle = "Handbook of Supernovae",
+ isbn = "9783319218465",
+ DOI
+ }""".replace('DOI', dois[0])
+ )]
+ new_bibs = [bm.Bib(
+ """@incollection{Alsabti2016,
+ title = {{Supernovae and Supernova Remnants: The Big Picture in Low Resolution}},
+ author = {Alsabti, Athem W. and Murdin, Paul},
+ year = {2017},
+ booktitle = {Handbook of Supernovae},
+ isbn = {9783319218465},
+ DOI
+ }""".replace('DOI', dois[1])
+ )]
+ merged = bm.merge(base=base_bibs, new=new_bibs)
+
+ assert len(merged) == 2
+ assert merged[0] == new_bibs[0]
+ assert merged[1] == base_bibs[0]
+
+
def test_duplicate_isbn_different_doi(capfd, entries):
text = entries['isbn_doi1'] + entries['isbn_doi2']
bibs = bm.read_file(text=text)
|
Bug: Regression from #90 with False duplicates detected for Incollection book entries
Issue #89 resolved by PR #90 dealt with false duplicate detections, which was merged into version 1.3.4. I am running version 1.4.5 installed via conda-forge and I am running into the same issue.
Here is a false double detection. The ISBNs are the same, but the DOIs are different.
What's expected here is that bibmanager does not complain during a merge with these two entries, or other similar entries.
Note: This is possibly a DOI parsing issue as these DOIs have some Latex escape sequences in them.
```DATABASE:
@incollection{OConnor2017,
author = "O’Connor, Evan",
editor = "Alsabti, A. W. and Murdin, P",
title = "{The Core-Collapse Supernova-Black Hole Connection}",
year = "2016",
booktitle = "Handbook of Supernovae",
pages = "1--18",
publisher = "Springer",
url = "https://doi.org/10.1007/978-3-319-20794-0\_129-1 http://link.springer.com/10.1007/978-3-319-20794-0\_129-1",
address = "Cham",
isbn = "9783319218465",
doi = "10.1007/978-3-319-20794-0{\\_}129-1"
}
NEW:
@incollection{Alsabti2016,
title = {{Supernovae and Supernova Remnants: The Big Picture in Low Resolution}},
year = {2017},
booktitle = {Handbook of Supernovae},
author = {Alsabti, Athem W. and Murdin, Paul},
editor = {Alsabti, A.~W. and Murdin, P},
pages = {3--28},
publisher = {Springer, Cham},
isbn = {9783319218465},
doi = {10.1007/978-3-319-21846-5{\_}1},
keywords = {Physics}
```
|
0.0
|
d301dfbad690af4f4744fd5afee16904729e7216
|
[
"tests/test_bib_manager.py::test_merge_duplicate_isbn_different_doi[dois0]",
"tests/test_bib_manager.py::test_merge_duplicate_isbn_different_doi[dois1]",
"tests/test_bib_manager.py::test_merge_duplicate_isbn_different_doi[dois2]"
] |
[
"tests/test_bib_manager.py::test_Bib_minimal",
"tests/test_bib_manager.py::test_Bib_ads_entry",
"tests/test_bib_manager.py::test_Bib_update_content_bib_info",
"tests/test_bib_manager.py::test_Bib_update_content_keep_meta",
"tests/test_bib_manager.py::test_Bib_update_content_update_meta",
"tests/test_bib_manager.py::test_Bib_mismatched_braces_raise",
"tests/test_bib_manager.py::test_Bib_update_key",
"tests/test_bib_manager.py::test_Bib_contains",
"tests/test_bib_manager.py::test_Bib_published_peer_reviewed",
"tests/test_bib_manager.py::test_Bib_published_arxiv",
"tests/test_bib_manager.py::test_Bib_published_non_ads",
"tests/test_bib_manager.py::test_Bib_month[-13]",
"tests/test_bib_manager.py::test_Bib_month[month",
"tests/test_bib_manager.py::test_Bib_lower_than_no_author",
"tests/test_bib_manager.py::test_Bib_lower_than_both_no_author",
"tests/test_bib_manager.py::test_Bib_lower_than_no_year",
"tests/test_bib_manager.py::test_Bib_equal_no_author",
"tests/test_bib_manager.py::test_Bib_equal_both_no_author",
"tests/test_bib_manager.py::test_Bib_not_equal_both_no_author",
"tests/test_bib_manager.py::test_Bib_not_equal_no_year",
"tests/test_bib_manager.py::test_Bib_equal_no_year",
"tests/test_bib_manager.py::test_Bib_meta",
"tests/test_bib_manager.py::test_Bib_warning_year",
"tests/test_bib_manager.py::test_Bib_warning_month[15]",
"tests/test_bib_manager.py::test_Bib_warning_month[tuesday]",
"tests/test_bib_manager.py::test_Bib_warning_authors_comma_typo",
"tests/test_bib_manager.py::test_Bib_warning_authors_missing_and",
"tests/test_bib_manager.py::test_display_bibs",
"tests/test_bib_manager.py::test_display_bibs_meta_not_shown",
"tests/test_bib_manager.py::test_display_bibs_meta_shown",
"tests/test_bib_manager.py::test_display_list_no_verb",
"tests/test_bib_manager.py::test_display_list_verb_neg",
"tests/test_bib_manager.py::test_display_list_verb_zero",
"tests/test_bib_manager.py::test_display_list_no_author",
"tests/test_bib_manager.py::test_display_list_no_year",
"tests/test_bib_manager.py::test_display_list_no_title",
"tests/test_bib_manager.py::test_display_list_verb_one",
"tests/test_bib_manager.py::test_display_list_verb_two",
"tests/test_bib_manager.py::test_display_list_no_arxiv",
"tests/test_bib_manager.py::test_display_list_no_ads",
"tests/test_bib_manager.py::test_display_list_verb_full",
"tests/test_bib_manager.py::test_remove_duplicates_no_duplicates",
"tests/test_bib_manager.py::test_remove_duplicates_identical",
"tests/test_bib_manager.py::test_remove_duplicates_diff_published",
"tests/test_bib_manager.py::test_remove_duplicates_query[mock_input0]",
"tests/test_bib_manager.py::test_filter_field_no_conflict",
"tests/test_bib_manager.py::test_filter_field_take_published",
"tests/test_bib_manager.py::test_filter_field_take_old",
"tests/test_bib_manager.py::test_filter_field_take_new",
"tests/test_bib_manager.py::test_filter_field_take_ask[mock_input0]",
"tests/test_bib_manager.py::test_filter_field_take_ask2[mock_input0]",
"tests/test_bib_manager.py::test_read_file_bibfile",
"tests/test_bib_manager.py::test_read_file_text",
"tests/test_bib_manager.py::test_read_file_single_line_entry",
"tests/test_bib_manager.py::test_read_file_ignore_comment",
"tests/test_bib_manager.py::test_read_file_ignore_comment_no_commas",
"tests/test_bib_manager.py::test_read_file_meta",
"tests/test_bib_manager.py::test_read_file_pdf_with_path",
"tests/test_bib_manager.py::test_read_file_pdf_with_bad_path",
"tests/test_bib_manager.py::test_read_file_error_bad_format",
"tests/test_bib_manager.py::test_read_file_error_open_end",
"tests/test_bib_manager.py::test_save",
"tests/test_bib_manager.py::test_load",
"tests/test_bib_manager.py::test_load_filed",
"tests/test_bib_manager.py::test_find_key",
"tests/test_bib_manager.py::test_find_bibcode",
"tests/test_bib_manager.py::test_find_key_bibcode",
"tests/test_bib_manager.py::test_find_key_not_found",
"tests/test_bib_manager.py::test_find_bibcode_not_found",
"tests/test_bib_manager.py::test_find_bibs",
"tests/test_bib_manager.py::test_find_no_arguments",
"tests/test_bib_manager.py::test_get_version_older",
"tests/test_bib_manager.py::test_get_version_no_pickle",
"tests/test_bib_manager.py::test_get_version_existing",
"tests/test_bib_manager.py::test_get_version_filed",
"tests/test_bib_manager.py::test_export_home",
"tests/test_bib_manager.py::test_export_no_overwrite",
"tests/test_bib_manager.py::test_export_meta",
"tests/test_bib_manager.py::test_export_no_meta",
"tests/test_bib_manager.py::test_merge_bibfile",
"tests/test_bib_manager.py::test_merge_bibs",
"tests/test_bib_manager.py::test_merge_no_new",
"tests/test_bib_manager.py::test_merge_base",
"tests/test_bib_manager.py::test_merge_bibs_no_titles",
"tests/test_bib_manager.py::test_merge_duplicate_key_ingnore[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_key_rename[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_title_ignore[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_title_add[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_isbn_same_doi[]",
"tests/test_bib_manager.py::test_merge_duplicate_isbn_same_doi[doi",
"tests/test_bib_manager.py::test_duplicate_isbn_different_doi",
"tests/test_bib_manager.py::test_duplicate_isbn_doi_vs_no_doi",
"tests/test_bib_manager.py::test_duplicate_isbn_same_unknown_doi[mock_input0]",
"tests/test_bib_manager.py::test_init_from_scratch",
"tests/test_bib_manager.py::test_add_entries_dry[mock_prompt0]",
"tests/test_bib_manager.py::test_add_entries[mock_prompt0]",
"tests/test_bib_manager.py::test_search_author_lastname",
"tests/test_bib_manager.py::test_search_author_last_initials",
"tests/test_bib_manager.py::test_search_author_first",
"tests/test_bib_manager.py::test_search_author_multiple",
"tests/test_bib_manager.py::test_search_author_year_title",
"tests/test_bib_manager.py::test_search_title_multiple",
"tests/test_bib_manager.py::test_search_title_entry_without_title",
"tests/test_bib_manager.py::test_search_year_specific",
"tests/test_bib_manager.py::test_search_year_range",
"tests/test_bib_manager.py::test_search_bibcode",
"tests/test_bib_manager.py::test_search_bibcode_utf8",
"tests/test_bib_manager.py::test_search_bibcode_multiple",
"tests/test_bib_manager.py::test_search_key",
"tests/test_bib_manager.py::test_search_key_multiple",
"tests/test_bib_manager.py::test_prompt_search_kw1[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_kw2[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_extra[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_empty_prompt[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_empty_value[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_blank_value[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_double_def[mock_prompt_session0]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-23 11:14:12+00:00
|
mit
| 4,481 |
|
pcubillos__bibmanager-90
|
diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 44ed2ef..a3ea45c 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -2372,3 +2372,26 @@ Propagated typo corrections into the docs.
*****
Updated link of license badge to point to RTD docs.
+
+
+***** Sat Jun 12 19:01:21 CEST 2021 *****
+
+In bm.remove_dubplicates() added additional check for DOI after
+identical ISBN values in case the entries refer to different chapters
+of a same book.
+Added respective tests.
+
+*****
+
+Small tweak to docs (put latest tutorial video first).
+
+*****
+
+Bumped bibmanager to version 1.3.4
+
+*****
+
+Added mock_init to test_duplicate_isbn_same_unknown_doi() tests
+because the code needs the config file initialized to set the screen
+output style.
+
diff --git a/bibmanager/VERSION.py b/bibmanager/VERSION.py
index 1d5038e..c3c2aca 100644
--- a/bibmanager/VERSION.py
+++ b/bibmanager/VERSION.py
@@ -2,4 +2,4 @@
# bibmanager is open-source software under the MIT license (see LICENSE).
# bibmanager Version:
-__version__ = "1.3.3"
+__version__ = "1.3.4"
diff --git a/bibmanager/bib_manager/bib_manager.py b/bibmanager/bib_manager/bib_manager.py
index 20aa2fe..a8a00a3 100644
--- a/bibmanager/bib_manager/bib_manager.py
+++ b/bibmanager/bib_manager/bib_manager.py
@@ -461,6 +461,22 @@ def remove_duplicates(bibs, field):
if nbibs == 1:
continue
+ # If field is isbn, check doi to differentiate chapters from same book:
+ if field == 'isbn':
+ dois = [
+ bibs[idx].doi if bibs[idx].doi is not None else ""
+ for idx in indices]
+ u_doi, doi_inv, doi_counts = np.unique(
+ dois, return_inverse=True, return_counts=True)
+ doi_multis = np.where((doi_counts > 1) & (ubib != ""))[0]
+ single_dois = u_doi[doi_counts==1]
+ indices = [
+ idx for idx,doi in zip(indices,dois)
+ if doi not in single_dois]
+ nbibs = len(indices)
+ if nbibs <= 1:
+ continue
+
# Query the user:
labels = [idx + " ENTRY:\n" for idx in u.ordinal(np.arange(nbibs)+1)]
display_bibs(labels, [bibs[i] for i in indices])
diff --git a/docs/index.rst b/docs/index.rst
index 3a49ea1..63494ca 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -50,13 +50,13 @@ Check out this video tutorial to get started with ``bibmanager``:
.. raw:: html
- <iframe width="720" height="405" src="https://www.youtube.com/embed/WVmhdwVNXOE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
+ <iframe width="720" height="405" src="https://www.youtube.com/embed/qewdBx0M8VE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
-And the new features for version 1.3+:
+And this one covering some other features:
.. raw:: html
- <iframe width="720" height="405" src="https://www.youtube.com/embed/qewdBx0M8VE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
+ <iframe width="720" height="405" src="https://www.youtube.com/embed/WVmhdwVNXOE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
.. _team:
|
pcubillos/bibmanager
|
0b465d2b83a889c09e1a668744f551ec01156882
|
diff --git a/tests/conftest.py b/tests/conftest.py
index 501e67a..e6c810b 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -233,20 +233,58 @@ archivePrefix = "arXiv",
publisher={AIP Publishing}
}"""
+ isbn_doi1 = """
+@INBOOK{2018haex.bookE.116P,
+ author = {{Parmentier}, Vivien and {Crossfield}, Ian J.~M.},
+ title = "{Exoplanet Phase Curves: Observations and Theory}",
+ year = 2018,
+ doi = {10.1007/978-3-319-55333-7\_116},
+ isbn = "978-3-319-55333-7",
+}"""
+
+ isbn_doi2 = """
+@INBOOK{2018haex.bookE.147C,
+ author = {{Cowan}, Nicolas B. and {Fujii}, Yuka},
+ title = "{Mapping Exoplanets}",
+ year = 2018,
+ doi = {10.1007/978-3-319-55333-7\_147},
+ isbn = "978-3-319-55333-7",
+}"""
+
+ isbn_no_doi1 = """
+@INBOOK{2018haex.bookE.116P,
+ author = {{Parmentier}, Vivien and {Crossfield}, Ian J.~M.},
+ title = "{Exoplanet Phase Curves: Observations and Theory}",
+ year = 2018,
+ isbn = "978-3-319-55333-7",
+}"""
+
+ isbn_no_doi2 = """
+@INBOOK{2018haex.bookE.147C,
+ author = {{Cowan}, Nicolas B. and {Fujii}, Yuka},
+ title = "{Mapping Exoplanets}",
+ year = 2018,
+ isbn = "978-3-319-55333-7",
+}"""
+
data = {
- 'jones_minimal': jones_minimal,
- 'jones_no_year': jones_no_year,
- 'jones_no_title': jones_no_title,
- 'jones_no_author': jones_no_author,
- 'jones_braces': jones_braces,
- 'beaulieu_apj': beaulieu_apj,
- 'beaulieu_arxiv': beaulieu_arxiv,
+ 'jones_minimal': jones_minimal,
+ 'jones_no_year': jones_no_year,
+ 'jones_no_title': jones_no_title,
+ 'jones_no_author': jones_no_author,
+ 'jones_braces': jones_braces,
+ 'beaulieu_apj': beaulieu_apj,
+ 'beaulieu_arxiv': beaulieu_arxiv,
'beaulieu_arxiv_dup': beaulieu_arxiv_dup,
- 'hunter': hunter,
- 'oliphant_dup': oliphant_dup,
- 'no_oliphant': no_oliphant,
- 'sing': sing,
- 'stodden': stodden,
+ 'hunter': hunter,
+ 'oliphant_dup': oliphant_dup,
+ 'no_oliphant': no_oliphant,
+ 'sing': sing,
+ 'stodden': stodden,
+ 'isbn_doi1': isbn_doi1,
+ 'isbn_doi2': isbn_doi2,
+ 'isbn_no_doi1': isbn_no_doi1,
+ 'isbn_no_doi2': isbn_no_doi2,
}
return data
diff --git a/tests/test_bib_manager.py b/tests/test_bib_manager.py
index 83edf8d..10922b9 100644
--- a/tests/test_bib_manager.py
+++ b/tests/test_bib_manager.py
@@ -801,6 +801,29 @@ def test_merge_duplicate_title_add(bibs, mock_init_sample, mock_input):
assert bibs['no_oliphant'] in loaded_bibs
+def test_duplicate_isbn_different_doi(capfd, entries):
+ text = entries['isbn_doi1'] + entries['isbn_doi2']
+ bibs = bm.read_file(text=text)
+ assert len(bibs) == 2
+ captured = capfd.readouterr()
+ assert captured.out == ''
+
+
+def test_duplicate_isbn_doi_vs_no_doi(capfd, entries):
+ text = entries['isbn_doi1'] + entries['isbn_no_doi2']
+ bibs = bm.read_file(text=text)
+ assert len(bibs) == 2
+ captured = capfd.readouterr()
+ assert captured.out == ''
+
+
[email protected]('mock_input', [['']], indirect=True)
+def test_duplicate_isbn_same_unknown_doi(mock_init, mock_input, entries):
+ text = entries['isbn_no_doi1'] + entries['isbn_no_doi2']
+ bibs = bm.read_file(text=text)
+ assert len(bibs) == 1
+
+
def test_init_from_scratch(mock_home):
shutil.rmtree(u.HOME, ignore_errors=True)
bm.init(bibfile=None)
|
ISBN duplicates problem
An edited book has many contributions. Each contribution has its own DOI number, but also shares its parent book's ISBN. Contributions and parent reference are now regarded as duplicates in bibmanager and merge always triggers asking. I have tried to disable exporting ISBN of contributions to bibfile. And this solved the annoyance of asking. But if only focusing on bibfile, this workaround seems lost ISBN information then. Is there a better way to settle this problem down?
|
0.0
|
0b465d2b83a889c09e1a668744f551ec01156882
|
[
"tests/test_bib_manager.py::test_duplicate_isbn_different_doi",
"tests/test_bib_manager.py::test_duplicate_isbn_doi_vs_no_doi"
] |
[
"tests/test_bib_manager.py::test_Bib_minimal",
"tests/test_bib_manager.py::test_Bib_ads_entry",
"tests/test_bib_manager.py::test_Bib_update_content",
"tests/test_bib_manager.py::test_Bib_mismatched_braces_raise",
"tests/test_bib_manager.py::test_Bib_update_key",
"tests/test_bib_manager.py::test_Bib_contains",
"tests/test_bib_manager.py::test_Bib_published_peer_reviewed",
"tests/test_bib_manager.py::test_Bib_published_arxiv",
"tests/test_bib_manager.py::test_Bib_published_non_ads",
"tests/test_bib_manager.py::test_Bib_month[-13]",
"tests/test_bib_manager.py::test_Bib_month[month",
"tests/test_bib_manager.py::test_Bib_lower_than_no_author",
"tests/test_bib_manager.py::test_Bib_lower_than_both_no_author",
"tests/test_bib_manager.py::test_Bib_lower_than_no_year",
"tests/test_bib_manager.py::test_Bib_equal_no_author",
"tests/test_bib_manager.py::test_Bib_equal_both_no_author",
"tests/test_bib_manager.py::test_Bib_not_equal_both_no_author",
"tests/test_bib_manager.py::test_Bib_not_equal_no_year",
"tests/test_bib_manager.py::test_Bib_equal_no_year",
"tests/test_bib_manager.py::test_Bib_meta",
"tests/test_bib_manager.py::test_Bib_warning_year",
"tests/test_bib_manager.py::test_Bib_warning_month[15]",
"tests/test_bib_manager.py::test_Bib_warning_month[tuesday]",
"tests/test_bib_manager.py::test_Bib_warning_authors_comma_typo",
"tests/test_bib_manager.py::test_Bib_warning_authors_missing_and",
"tests/test_bib_manager.py::test_remove_duplicates_no_duplicates",
"tests/test_bib_manager.py::test_remove_duplicates_identical",
"tests/test_bib_manager.py::test_remove_duplicates_diff_published",
"tests/test_bib_manager.py::test_remove_duplicates_query[mock_input0]",
"tests/test_bib_manager.py::test_filter_field_no_conflict",
"tests/test_bib_manager.py::test_filter_field_take_published",
"tests/test_bib_manager.py::test_filter_field_take_old",
"tests/test_bib_manager.py::test_filter_field_take_new",
"tests/test_bib_manager.py::test_filter_field_take_ask[mock_input0]",
"tests/test_bib_manager.py::test_filter_field_take_ask2[mock_input0]",
"tests/test_bib_manager.py::test_read_file_bibfile",
"tests/test_bib_manager.py::test_read_file_text",
"tests/test_bib_manager.py::test_read_file_single_line_entry",
"tests/test_bib_manager.py::test_read_file_ignore_comment",
"tests/test_bib_manager.py::test_read_file_ignore_comment_no_commas",
"tests/test_bib_manager.py::test_read_file_meta",
"tests/test_bib_manager.py::test_read_file_pdf_with_path",
"tests/test_bib_manager.py::test_read_file_pdf_with_bad_path",
"tests/test_bib_manager.py::test_read_file_error_bad_format",
"tests/test_bib_manager.py::test_read_file_error_open_end",
"tests/test_bib_manager.py::test_save",
"tests/test_bib_manager.py::test_load",
"tests/test_bib_manager.py::test_load_filed",
"tests/test_bib_manager.py::test_find_key",
"tests/test_bib_manager.py::test_find_bibcode",
"tests/test_bib_manager.py::test_find_key_bibcode",
"tests/test_bib_manager.py::test_find_key_not_found",
"tests/test_bib_manager.py::test_find_bibcode_not_found",
"tests/test_bib_manager.py::test_find_bibs",
"tests/test_bib_manager.py::test_find_no_arguments",
"tests/test_bib_manager.py::test_get_version_older",
"tests/test_bib_manager.py::test_get_version_no_pickle",
"tests/test_bib_manager.py::test_get_version_existing",
"tests/test_bib_manager.py::test_get_version_filed",
"tests/test_bib_manager.py::test_export_home",
"tests/test_bib_manager.py::test_export_no_overwrite",
"tests/test_bib_manager.py::test_export_meta",
"tests/test_bib_manager.py::test_export_no_meta",
"tests/test_bib_manager.py::test_merge_bibfile",
"tests/test_bib_manager.py::test_merge_bibs",
"tests/test_bib_manager.py::test_merge_no_new",
"tests/test_bib_manager.py::test_merge_base",
"tests/test_bib_manager.py::test_merge_bibs_no_titles",
"tests/test_bib_manager.py::test_merge_duplicate_key_ingnore[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_key_rename[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_title_ignore[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_title_add[mock_input0]",
"tests/test_bib_manager.py::test_duplicate_isbn_same_unknown_doi[mock_input0]",
"tests/test_bib_manager.py::test_init_from_scratch",
"tests/test_bib_manager.py::test_add_entries_dry[mock_prompt0]",
"tests/test_bib_manager.py::test_add_entries[mock_prompt0]",
"tests/test_bib_manager.py::test_search_author_lastname",
"tests/test_bib_manager.py::test_search_author_last_initials",
"tests/test_bib_manager.py::test_search_author_first",
"tests/test_bib_manager.py::test_search_author_multiple",
"tests/test_bib_manager.py::test_search_author_year_title",
"tests/test_bib_manager.py::test_search_title_multiple",
"tests/test_bib_manager.py::test_search_title_entry_without_title",
"tests/test_bib_manager.py::test_search_year_specific",
"tests/test_bib_manager.py::test_search_year_range",
"tests/test_bib_manager.py::test_search_bibcode",
"tests/test_bib_manager.py::test_search_bibcode_utf8",
"tests/test_bib_manager.py::test_search_bibcode_multiple",
"tests/test_bib_manager.py::test_search_key",
"tests/test_bib_manager.py::test_search_key_multiple",
"tests/test_bib_manager.py::test_prompt_search_kw1[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_kw2[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_extra[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_empty_prompt[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_empty_value[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_blank_value[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_double_def[mock_prompt_session0]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-12 17:21:31+00:00
|
mit
| 4,482 |
|
pddg__uroboros-29
|
diff --git a/uroboros/command.py b/uroboros/command.py
index 8167aba..963cba2 100644
--- a/uroboros/command.py
+++ b/uroboros/command.py
@@ -134,25 +134,26 @@ class Command(metaclass=abc.ABCMeta):
parser.set_defaults(func=self.run)
return parser
- def add_command(self, command: 'Command') -> 'Command':
+ def add_command(self, *commands: 'Command') -> 'Command':
"""
Add sub command to this command.
- :param command: An instance of `uroboros.command.Command`
+ :param commands: An instance of `uroboros.command.Command`
:return: None
"""
- assert isinstance(command, Command), \
- "Given command is not an instance of `uroboros.Command` or" \
- "an instance of its subclass."
- assert getattr(command, "name", None) is not None, \
- "{} does not have `name` attribute.".format(
- command.__class__.__name__)
- command_id = id(command)
- if command_id in self._parent_ids or \
- command_id in self._sub_command_ids:
- raise errors.CommandDuplicateError(command, self)
- command.register_parent(self._parent_ids)
- command.increment_nest(self._layer)
- self.sub_commands.append(command)
+ for command in commands:
+ assert isinstance(command, Command), \
+ "Given command is not an instance of `uroboros.Command` or" \
+ "an instance of its subclass."
+ assert getattr(command, "name", None) is not None, \
+ "{} does not have `name` attribute.".format(
+ command.__class__.__name__)
+ command_id = id(command)
+ if command_id in self._parent_ids or \
+ command_id in self._sub_command_ids:
+ raise errors.CommandDuplicateError(command, self)
+ command.register_parent(self._parent_ids)
+ command.increment_nest(self._layer)
+ self.sub_commands.append(command)
return self
@property
|
pddg/uroboros
|
f1eb29a42b6f75f573db2185eaba3762bd2ae71e
|
diff --git a/tests/test_command.py b/tests/test_command.py
index cfa85f1..b80b64f 100644
--- a/tests/test_command.py
+++ b/tests/test_command.py
@@ -134,6 +134,18 @@ class TestCommand(object):
actual = command._sub_command_ids
assert actual == expected
+ @pytest.mark.parametrize(
+ 'root_cmd,add_commands', [
+ (RootCommand(), [SecondCommand()]),
+ (RootCommand(), [SecondCommand(), ThirdCommand()]),
+ ]
+ )
+ def test_multiple_add_command(self, root_cmd, add_commands):
+ root_cmd.add_command(*add_commands)
+ actual = root_cmd._sub_command_ids
+ expected = set([id(c) for c in add_commands])
+ assert actual == expected
+
def test_add_others(self):
root = RootCommand()
with pytest.raises(AssertionError):
|
Support multiple argument for `add_command()`
## What is this?
`uroboros.Command.add_command()` supports only one `Command` now.
This feature will support multiple `Command`.
Before
```python
root = RootCommand()
root.add_command(ACommand())
root.add_command(BCommand())
root.add_command(CCommand())
```
After
```python
root = RootCommand()
root.add_command(
ACommand(),
BCommand(),
CCommand(),
)
```
## How to implement?
Just use `for` loop
|
0.0
|
f1eb29a42b6f75f573db2185eaba3762bd2ae71e
|
[
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd1-add_commands1]"
] |
[
"tests/test_command.py::TestCommand::test_build_option[command0]",
"tests/test_command.py::TestCommand::test_build_option[command1]",
"tests/test_command.py::TestCommand::test_build_option[command2]",
"tests/test_command.py::TestCommand::test_validate[command0]",
"tests/test_command.py::TestCommand::test_validate[command1]",
"tests/test_command.py::TestCommand::test_validate[command2]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set0]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set1]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set2]",
"tests/test_command.py::TestCommand::test_register_parent[command_set0]",
"tests/test_command.py::TestCommand::test_register_parent[command_set1]",
"tests/test_command.py::TestCommand::test_register_parent[command_set2]",
"tests/test_command.py::TestCommand::test_add_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_command[command_set1]",
"tests/test_command.py::TestCommand::test_add_command[command_set2]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd0-add_commands0]",
"tests/test_command.py::TestCommand::test_add_others",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set0]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set1]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set2]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set0-argv0]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set1-argv1]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set2-argv2]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set3-argv3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set4-argv4]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set1]",
"tests/test_command.py::TestCommand::test_execute[command_set0-root",
"tests/test_command.py::TestCommand::test_execute[command_set1-root",
"tests/test_command.py::TestCommand::test_execute[command_set2-root-False]",
"tests/test_command.py::TestCommand::test_execute[command_set3-root",
"tests/test_command.py::TestCommand::test_execute[command_set4-root",
"tests/test_command.py::TestCommand::test_execute[command_set5-root-False]",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set0-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set1-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set2-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set3-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set4-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set5-root",
"tests/test_command.py::TestCommand::test_before_validate",
"tests/test_command.py::TestCommand::test_pre_hook[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook[commands2]",
"tests/test_command.py::TestCommand::test_after_validate",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands2]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-09 15:50:59+00:00
|
apache-2.0
| 4,483 |
|
pddg__uroboros-31
|
diff --git a/uroboros/command.py b/uroboros/command.py
index 963cba2..1f15f8b 100644
--- a/uroboros/command.py
+++ b/uroboros/command.py
@@ -221,6 +221,13 @@ class Command(metaclass=abc.ABCMeta):
self: commands_dict,
}
+ def get_options(self) -> 'List[Option]':
+ """
+ Get all `Option` instance of this `Command`.
+ :return: List of Option instance
+ """
+ return self.options
+
def print_help(self):
"""
Helper method for print the help message of this command.
@@ -233,10 +240,25 @@ class Command(metaclass=abc.ABCMeta):
sub_commands: 'List[Command]') -> 'argparse.Namespace':
return utils.call_one_by_one(
[self] + sub_commands,
- "before_validate",
- args
+ "_hook",
+ args,
+ hook_name="before_validate"
)
+ def _hook(self,
+ args: 'argparse.Namespace',
+ hook_name: str) -> 'argparse.Namespace':
+ for opt in self.get_options():
+ assert hasattr(opt, hook_name), \
+ "{} does not have '{}' method".format(
+ opt.__class__.__name__, hook_name)
+ args = getattr(opt, hook_name)(args)
+ assert hasattr(self, hook_name), \
+ "{} does not have '{}' method".format(
+ self.__class__.__name__, hook_name)
+ args = getattr(self, hook_name)(args)
+ return args
+
def before_validate(self,
unsafe_args: 'argparse.Namespace'
) -> 'argparse.Namespace':
@@ -276,8 +298,9 @@ class Command(metaclass=abc.ABCMeta):
) -> 'argparse.Namespace':
return utils.call_one_by_one(
[self] + sub_commands,
- "after_validate",
- args
+ "_hook",
+ args,
+ hook_name='after_validate'
)
def after_validate(self,
diff --git a/uroboros/option.py b/uroboros/option.py
index 1707b2a..bd30d45 100644
--- a/uroboros/option.py
+++ b/uroboros/option.py
@@ -19,6 +19,15 @@ class Option(metaclass=abc.ABCMeta):
-> 'argparse.ArgumentParser':
raise NotImplementedError
- @abc.abstractmethod
+ def before_validate(self,
+ unsafe_args: 'argparse.Namespace'
+ ) -> 'argparse.Namespace':
+ return unsafe_args
+
def validate(self, args: 'argparse.Namespace') -> 'List[Exception]':
- raise NotImplementedError
+ raise []
+
+ def after_validate(self,
+ safe_args: 'argparse.Namespace'
+ ) -> 'argparse.Namespace':
+ return safe_args
diff --git a/uroboros/utils.py b/uroboros/utils.py
index 04b4c6e..6a90229 100644
--- a/uroboros/utils.py
+++ b/uroboros/utils.py
@@ -10,12 +10,12 @@ def get_args_section_name(layer: int):
return "__layer{layer}_parser".format(layer=layer)
-def call_one_by_one(objs, method_name: str, args):
+def call_one_by_one(objs, method_name: str, args, **kwargs):
for obj in objs:
assert hasattr(obj, method_name), \
"'{cmd}' has no method '{method}".format(
cmd=obj.__name__,
method=method_name
)
- args = getattr(obj, method_name)(args)
+ args = getattr(obj, method_name)(args, **kwargs)
return args
|
pddg/uroboros
|
888c00e6f082510eb80de1ae4708cc2dd3d023a0
|
diff --git a/tests/test_option.py b/tests/test_option.py
new file mode 100644
index 0000000..8292b58
--- /dev/null
+++ b/tests/test_option.py
@@ -0,0 +1,90 @@
+import argparse
+
+import pytest
+
+import uroboros
+
+
+class SampleOption(uroboros.Option):
+
+ name = 'option'
+ value = 'option'
+
+ def build_option(self,
+ parser: 'argparse.ArgumentParser'
+ ) -> 'argparse.ArgumentParser':
+ parser.add_argument('--{}'.format(self.name),
+ default=self.value,
+ type=str)
+ return parser
+
+ def validate(self,
+ args: 'argparse.Namespace'
+ ) -> 'List[Exception]':
+ if getattr(args, self.name) != self.value:
+ return [Exception("{} is expected".format(self.value))]
+ return []
+
+ def before_validate(self,
+ unsafe_args: 'argparse.Namespace'
+ ) -> 'argparse.Namespace':
+ setattr(
+ unsafe_args,
+ 'before_validate_{}'.format(self.name),
+ self.value
+ )
+ return unsafe_args
+
+ def after_validate(self, safe_args):
+ setattr(
+ safe_args,
+ 'after_validate_{}'.format(self.name),
+ self.value
+ )
+ return safe_args
+
+
+class NoHookOption(uroboros.Option):
+ name = 'nohook'
+ value = 'nohook'
+
+ def build_option(self,
+ parser: 'argparse.ArgumentParser'
+ ) -> 'argparse.ArgumentParser':
+ parser.add_argument("--{}".format(self.name), default=self.value)
+ return parser
+
+
+class TestOption(object):
+
+ def test_no_before_validate(self):
+ args = argparse.Namespace()
+ nohook = NoHookOption()
+ assert nohook.before_validate(args) == args
+
+ def test_before_hook(self):
+ args = argparse.Namespace()
+ opt = SampleOption()
+ hooked_args = opt.after_validate(args)
+ actual = getattr(
+ hooked_args, "after_validate_{}".format(opt.name))
+ assert actual == opt.value
+
+ def test_no_after_validate(self):
+ args = argparse.Namespace()
+ nohook = NoHookOption()
+ assert nohook.before_validate(args) == args
+
+ def test_after_hook(self):
+ args = argparse.Namespace()
+ opt = SampleOption()
+ hooked_args = opt.after_validate(args)
+ actual = getattr(
+ hooked_args, "after_validate_{}".format(opt.name))
+ assert actual == opt.value
+
+ def test_cannot_instantiate(self):
+ class Opt(uroboros.Option):
+ pass
+ with pytest.raises(TypeError):
+ Opt()
|
Implement hook function to `uroboros.Option`
I forgot to implement it...
|
0.0
|
888c00e6f082510eb80de1ae4708cc2dd3d023a0
|
[
"tests/test_option.py::TestOption::test_no_before_validate",
"tests/test_option.py::TestOption::test_no_after_validate"
] |
[
"tests/test_option.py::TestOption::test_before_hook",
"tests/test_option.py::TestOption::test_after_hook",
"tests/test_option.py::TestOption::test_cannot_instantiate"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-10 04:49:13+00:00
|
apache-2.0
| 4,484 |
|
pddg__uroboros-34
|
diff --git a/uroboros/command.py b/uroboros/command.py
index 1f15f8b..b32f61c 100644
--- a/uroboros/command.py
+++ b/uroboros/command.py
@@ -121,7 +121,7 @@ class Command(metaclass=abc.ABCMeta):
name=cmd.name,
description=cmd.long_description,
help=cmd.short_description,
- parents=[o.get_parser() for o in cmd.options],
+ parents=[o.get_parser() for o in cmd.get_options()],
)
cmd.initialize(sub_parser)
@@ -129,7 +129,7 @@ class Command(metaclass=abc.ABCMeta):
parser = argparse.ArgumentParser(
prog=self.name,
description=self.long_description,
- parents=[o.get_parser() for o in self.options]
+ parents=[o.get_parser() for o in self.get_options()]
)
parser.set_defaults(func=self.run)
return parser
|
pddg/uroboros
|
45d0b03c86a75d96237cd885d37b8715bf5cfdfc
|
diff --git a/tests/test_command.py b/tests/test_command.py
index b80b64f..a7eb5f0 100644
--- a/tests/test_command.py
+++ b/tests/test_command.py
@@ -4,6 +4,7 @@ from unittest import mock
import pytest
+from uroboros import Option
from uroboros.errors import CommandDuplicateError
from .base import RootCommand, SecondCommand, ThirdCommand
@@ -318,3 +319,22 @@ class TestCommand(object):
for cmd in commands:
key = "after_validate_{}".format(cmd.name)
assert getattr(args, key, None) == cmd.value
+
+ def test_create_default_parser(self):
+ class Opt(Option):
+ def build_option(self, parser):
+ parser.add_argument("--test", type=str)
+ return parser
+
+ class Cmd(RootCommand):
+ options = [Opt()]
+
+ class Cmd2(RootCommand):
+ def get_options(self):
+ return [Opt()]
+
+ argv = ["--test", "test"]
+ for cmd in [Cmd(), Cmd2()]:
+ cmd_parser = cmd.create_default_parser()
+ args = cmd_parser.parse_args(argv)
+ assert args.test == 'test'
|
`Command.get_options()` does not work well
### Expected
```python
class SomeOption(uroboros.Option):
def build_option(self, parser):
parser.add_argument('--opt', type=str)
return parser
class HelloCommand(uroboros.Command):
def get_options():
return [SomeOption()]
HelloCommand().execute()
```
Now, `--opt` can be used.
### Actual
`--opt` does not be appeared in help message, and cannot parse it.
|
0.0
|
45d0b03c86a75d96237cd885d37b8715bf5cfdfc
|
[
"tests/test_command.py::TestCommand::test_create_default_parser"
] |
[
"tests/test_command.py::TestCommand::test_build_option[command0]",
"tests/test_command.py::TestCommand::test_build_option[command1]",
"tests/test_command.py::TestCommand::test_build_option[command2]",
"tests/test_command.py::TestCommand::test_validate[command0]",
"tests/test_command.py::TestCommand::test_validate[command1]",
"tests/test_command.py::TestCommand::test_validate[command2]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set0]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set1]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set2]",
"tests/test_command.py::TestCommand::test_register_parent[command_set0]",
"tests/test_command.py::TestCommand::test_register_parent[command_set1]",
"tests/test_command.py::TestCommand::test_register_parent[command_set2]",
"tests/test_command.py::TestCommand::test_add_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_command[command_set1]",
"tests/test_command.py::TestCommand::test_add_command[command_set2]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd0-add_commands0]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd1-add_commands1]",
"tests/test_command.py::TestCommand::test_add_others",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set0]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set1]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set2]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set0-argv0]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set1-argv1]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set2-argv2]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set3-argv3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set4-argv4]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set1]",
"tests/test_command.py::TestCommand::test_execute[command_set0-root",
"tests/test_command.py::TestCommand::test_execute[command_set1-root",
"tests/test_command.py::TestCommand::test_execute[command_set2-root-False]",
"tests/test_command.py::TestCommand::test_execute[command_set3-root",
"tests/test_command.py::TestCommand::test_execute[command_set4-root",
"tests/test_command.py::TestCommand::test_execute[command_set5-root-False]",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set0-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set1-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set2-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set3-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set4-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set5-root",
"tests/test_command.py::TestCommand::test_before_validate",
"tests/test_command.py::TestCommand::test_pre_hook[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook[commands2]",
"tests/test_command.py::TestCommand::test_after_validate",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands2]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-21 03:15:42+00:00
|
apache-2.0
| 4,485 |
|
pddg__uroboros-37
|
diff --git a/uroboros/command.py b/uroboros/command.py
index b32f61c..2188031 100644
--- a/uroboros/command.py
+++ b/uroboros/command.py
@@ -226,7 +226,7 @@ class Command(metaclass=abc.ABCMeta):
Get all `Option` instance of this `Command`.
:return: List of Option instance
"""
- return self.options
+ return [opt() if type(opt) == type else opt for opt in self.options]
def print_help(self):
"""
|
pddg/uroboros
|
7532a7f3e3cdfe11764c04dc03361a4ee51e1b56
|
diff --git a/tests/test_command.py b/tests/test_command.py
index a7eb5f0..05a703b 100644
--- a/tests/test_command.py
+++ b/tests/test_command.py
@@ -13,6 +13,18 @@ from .base import RootCommand, SecondCommand, ThirdCommand
def commands():
return RootCommand(), SecondCommand(), ThirdCommand()
+class Opt1(Option):
+ def build_option(self, parser):
+ return parser
+
+class Opt2(Option):
+ def build_option(self, parser):
+ return parser
+
+class Opt3(Option):
+ def build_option(self, parser):
+ return parser
+
def get_sub_commands(cmd_set):
if len(cmd_set) == 0:
@@ -338,3 +350,34 @@ class TestCommand(object):
cmd_parser = cmd.create_default_parser()
args = cmd_parser.parse_args(argv)
assert args.test == 'test'
+
+ @pytest.mark.parametrize(
+ 'option_objs', [
+ [Opt1(), Opt2(), Opt3()],
+ ]
+ )
+ def test_get_options(self, option_objs):
+ additional_opt = Opt1()
+ class SourceCommand(RootCommand):
+ options = option_objs
+ class Cmd(SourceCommand):
+ def get_options(self):
+ opts =super(Cmd, self).get_options()
+ opts.append(additional_opt)
+ return opts
+ root = SourceCommand()
+ source_opts = root.get_options()
+ cmd = Cmd()
+ actual_options = cmd.get_options()
+ expected_options = option_objs + [additional_opt]
+ assert len(actual_options) == len(expected_options)
+ # All options are instantiated
+ types = map(type, actual_options)
+ bools = map(lambda x: x != type, types)
+ assert all(bools)
+ # All class is correct
+ actual_classes = map(lambda x: type(x), actual_options)
+ expected_classes = map(lambda x: x if type(x) == type else type(x), expected_options)
+ assert list(actual_classes) == list(expected_classes)
+ # Inheritance source class is not modified
+ assert RootCommand().get_options() == []
|
Operations on `options` in the inheritance destination class also affect the inheritance source.
Sample code is as follows.
```python
class Opt(uroboros.Option):
def build_option(self, parser):
parser.add_argument("--test", type=str)
return parser
class RootCommand(uroboros.Command):
name = 'root'
def run(self, args):
self.print_help()
return 0
class BaseCommand(uroboros.Command):
options = []
def run(self, args):
print(args.test)
return 0
class FirstCommand(BaseCommand):
name = 'first'
def get_options(self):
options = super(FirstCommand, self).get_options()
options.append(Opt())
return options
class SecondCommand(BaseCommand):
name = 'second'
root = RootCommand()
root.add_command(
FirstCommand(),
SecondCommand(),
)
root.execute()
```
## Expected
```sh
$ pipenv run python sample.py -h
usage: root [-h] {first,second} ...
optional arguments:
-h, --help show this help message and exit
Sub commands:
{first,second}
first
second
$pipenv run python sample.py first -h
usage: root first [-h] [--test TEST]
optional arguments:
-h, --help show this help message and exit
--test TEST
$pipenv run python sample.py second -h
usage: root second [-h]
optional arguments:
-h, --help show this help message and exit
```
## Actual
```
$ pipenv run python sample.py -h
Traceback (most recent call last):
File "/home/pudding/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 57, in execute
self._check_initialized()
File "/home/pudding/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 322, in _check_initialized
raise errors.CommandNotRegisteredError(self.name)
uroboros.errors.CommandNotRegisteredError: Command 'root' has not been registered yet.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 36, in <module>
root.execute()
File "/home/pudding/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 59, in execute
self.initialize()
File "/home/pudding/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 110, in initialize
self.initialize_sub_parsers(self._parser)
File "/home/pudding/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 124, in initialize_sub_parsers
parents=[o.get_parser() for o in cmd.get_options()],
File "/home/pudding/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 124, in <listcomp>
parents=[o.get_parser() for o in cmd.get_options()],
File "/home/s-kokuryo/.local/share/virtualenvs/swat-analyzer-EWaBsfe_/lib/python3.7/site-packages/uroboros/option.py", line 15, in get_parser
return self.build_option(self.parser)
File "test.py", line 5, in build_option
parser.add_argument("--test", type=str)
File "/home/pudding/.anyenv/envs/pyenv/versions/3.7.4/lib/python3.7/argparse.py", line 1367, in add_argument
return self._add_action(action)
File "/home/pudding/.anyenv/envs/pyenv/versions/3.7.4/lib/python3.7/argparse.py", line 1730, in _add_action
self._optionals._add_action(action)
File "/home/pudding/.anyenv/envs/pyenv/versions/3.7.4/lib/python3.7/argparse.py", line 1571, in _add_action
action = super(_ArgumentGroup, self)._add_action(action)
File "/home/pudding/.anyenv/envs/pyenv/versions/3.7.4/lib/python3.7/argparse.py", line 1381, in _add_action
self._check_conflict(action)
File "/home/pudding/.anyenv/envs/pyenv/versions/3.7.4/lib/python3.7/argparse.py", line 1520, in _check_conflict
conflict_handler(action, confl_optionals)
File "/home/pudding/.anyenv/envs/pyenv/versions/3.7.4/lib/python3.7/argparse.py", line 1529, in _handle_conflict_error
raise ArgumentError(action, message % conflict_string)
argparse.ArgumentError: argument --test: conflicting option string: --test
```
## Why this happened?
Python's list object is mutable. `uroboros.Option`'s `get_options()` function return the pointer of `options` attribute itself. So the operation on it in the inheritance class also affect its source class.
## How to fix
Change the implementation of `uroboros.Command.get_options` like [permission mechanism of django-restframework's view class](https://github.com/encode/django-rest-framework/blob/3.10.3/rest_framework/views.py#L266).
`uroboros.Command.options` accepts both of `uroboros.Option` class and its instance for backward compatibility. Check the type of the attribute of `uroboros.Command.options` and instantiate the class if need, and generate new list and return.
|
0.0
|
7532a7f3e3cdfe11764c04dc03361a4ee51e1b56
|
[
"tests/test_command.py::TestCommand::test_get_options[option_objs0]"
] |
[
"tests/test_command.py::TestCommand::test_build_option[command0]",
"tests/test_command.py::TestCommand::test_build_option[command1]",
"tests/test_command.py::TestCommand::test_build_option[command2]",
"tests/test_command.py::TestCommand::test_validate[command0]",
"tests/test_command.py::TestCommand::test_validate[command1]",
"tests/test_command.py::TestCommand::test_validate[command2]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set0]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set1]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set2]",
"tests/test_command.py::TestCommand::test_register_parent[command_set0]",
"tests/test_command.py::TestCommand::test_register_parent[command_set1]",
"tests/test_command.py::TestCommand::test_register_parent[command_set2]",
"tests/test_command.py::TestCommand::test_add_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_command[command_set1]",
"tests/test_command.py::TestCommand::test_add_command[command_set2]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd0-add_commands0]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd1-add_commands1]",
"tests/test_command.py::TestCommand::test_add_others",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set0]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set1]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set2]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set0-argv0]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set1-argv1]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set2-argv2]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set3-argv3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set4-argv4]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set1]",
"tests/test_command.py::TestCommand::test_execute[command_set0-root",
"tests/test_command.py::TestCommand::test_execute[command_set1-root",
"tests/test_command.py::TestCommand::test_execute[command_set2-root-False]",
"tests/test_command.py::TestCommand::test_execute[command_set3-root",
"tests/test_command.py::TestCommand::test_execute[command_set4-root",
"tests/test_command.py::TestCommand::test_execute[command_set5-root-False]",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set0-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set1-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set2-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set3-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set4-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set5-root",
"tests/test_command.py::TestCommand::test_before_validate",
"tests/test_command.py::TestCommand::test_pre_hook[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook[commands2]",
"tests/test_command.py::TestCommand::test_after_validate",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands2]",
"tests/test_command.py::TestCommand::test_create_default_parser"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-25 04:12:21+00:00
|
apache-2.0
| 4,486 |
|
pddg__uroboros-41
|
diff --git a/uroboros/option.py b/uroboros/option.py
index 03975ff..65bf35f 100644
--- a/uroboros/option.py
+++ b/uroboros/option.py
@@ -27,7 +27,7 @@ class Option(metaclass=abc.ABCMeta):
return unsafe_args
def validate(self, args: 'argparse.Namespace') -> 'List[Exception]':
- raise []
+ return []
def after_validate(self,
safe_args: 'argparse.Namespace'
|
pddg/uroboros
|
e21435c22adc8da53e3b1036f71f1a111525d849
|
diff --git a/tests/test_option.py b/tests/test_option.py
index 67b0751..00e2a24 100644
--- a/tests/test_option.py
+++ b/tests/test_option.py
@@ -57,6 +57,13 @@ class NoHookOption(uroboros.Option):
class TestOption(object):
+ def test_validate(self):
+ args = argparse.Namespace()
+ nohook = NoHookOption()
+ actual = nohook.validate(args)
+ assert type(actual) == list
+ assert len(actual) == 0
+
def test_no_before_validate(self):
args = argparse.Namespace()
nohook = NoHookOption()
|
Use `return` not `raise` in Option.validate
If the validate function is not overwritten, it rases following exception.
```sh
Traceback (most recent call last):
File "examples/main.py", line 310, in <module>
exit_code = root_cmd.execute()
File "/home/pudding/.local/share/virtualenvs/hoger-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 67, in execute
exceptions = self._validate_all(args, commands)
File "/home/pudding/.local/share/virtualenvs/hoge-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 281, in _validate_all
exceptions.extend(cmd.validate(args))
File "/home/pudding/.local/share/virtualenvs/hoge-EWaBsfe_/lib/python3.7/site-packages/uroboros/command.py", line 292, in validate
exceptions.extend(opt.validate(args))
File "/home/pudding/.local/share/virtualenvs/hoge-EWaBsfe_/lib/python3.7/site-packages/uroboros/option.py", line 30, in validate
raise []
TypeError: exceptions must derive from BaseException
```
|
0.0
|
e21435c22adc8da53e3b1036f71f1a111525d849
|
[
"tests/test_option.py::TestOption::test_validate"
] |
[
"tests/test_option.py::TestOption::test_no_before_validate",
"tests/test_option.py::TestOption::test_before_hook",
"tests/test_option.py::TestOption::test_no_after_validate",
"tests/test_option.py::TestOption::test_after_hook",
"tests/test_option.py::TestOption::test_cannot_instantiate",
"tests/test_option.py::TestOption::test_call_twice[option0]",
"tests/test_option.py::TestOption::test_call_twice[option1]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-30 04:05:59+00:00
|
apache-2.0
| 4,487 |
|
pddg__uroboros-49
|
diff --git a/Pipfile b/Pipfile
index 6095ede..b96cc9f 100644
--- a/Pipfile
+++ b/Pipfile
@@ -10,6 +10,7 @@ tox = "*"
uroboros = {editable = true,path = "."}
[packages]
+zipp = "*"
[requires]
python_version = "3.7"
diff --git a/Pipfile.lock b/Pipfile.lock
index 5c9f134..b049267 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "cd721bd9293a6a388c3b0c6f647934a81959b0b382f0faafcc08a74b533529a3"
+ "sha256": "a234c0b9b95d35722035205fef2e372c310c40ad57917e0f9f598266ccf153c3"
},
"pipfile-spec": 6,
"requires": {
@@ -15,7 +15,16 @@
}
]
},
- "default": {},
+ "default": {
+ "zipp": {
+ "hashes": [
+ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b",
+ "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"
+ ],
+ "index": "pypi",
+ "version": "==3.1.0"
+ }
+ },
"develop": {
"appdirs": {
"hashes": [
@@ -34,9 +43,10 @@
},
"distlib": {
"hashes": [
- "sha256:2e166e231a26b36d6dfe35a48c4464346620f8645ed0ace01ee31822b288de21"
+ "sha256:8c09de2c67b3e7deef7184574fc060ab8a793e7adbb183d942c389c8b13c52fb",
+ "sha256:edf6116872c863e1aa9d5bb7cb5e05a022c519a4594dc703843343a9ddd9bff1"
],
- "version": "==0.3.0"
+ "version": "==0.3.1"
},
"filelock": {
"hashes": [
@@ -149,11 +159,11 @@
},
"tox": {
"hashes": [
- "sha256:a0d36849e59ac4a28664e80951a634b0e920d88047ce3fa8fa7b45216e573f30",
- "sha256:db12b48359ba2cbc8c8f7ab712706ee67d59f8f9e8e0c795dcc45349b8784e48"
+ "sha256:60c3793f8ab194097ec75b5a9866138444f63742b0f664ec80be1222a40687c5",
+ "sha256:9a746cda9cadb9e1e05c7ab99f98cfcea355140d2ecac5f97520be94657c3bc7"
],
"index": "pypi",
- "version": "==3.16.0"
+ "version": "==3.16.1"
},
"uroboros": {
"editable": true,
@@ -175,8 +185,12 @@
"version": "==0.2.5"
},
"zipp": {
- "editable": true,
- "path": "."
+ "hashes": [
+ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b",
+ "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"
+ ],
+ "index": "pypi",
+ "version": "==3.1.0"
}
}
}
diff --git a/uroboros/command.py b/uroboros/command.py
index 2188031..e6d14a6 100644
--- a/uroboros/command.py
+++ b/uroboros/command.py
@@ -11,6 +11,7 @@ from uroboros.constants import ExitStatus
if TYPE_CHECKING:
from typing import List, Dict, Optional, Union, Set
from uroboros.option import Option
+ CommandDict = Dict['Command', 'Optional[Command]']
class Command(metaclass=abc.ABCMeta):
@@ -43,12 +44,14 @@ class Command(metaclass=abc.ABCMeta):
self._parser = None # type: Optional[argparse.ArgumentParser]
def execute(self, argv: 'List[str]' = None) -> int:
- """
- Execute `uroboros.command.Command.run` internally.
- And return exit code (integer).
- :param argv: Arguments to parse. If None is given, try to parse
- `sys.argv` by the parser of this command.
- :return: Exit status (integer only)
+ """Execute the command and return exit code (integer)
+
+ Args:
+ argv (:obj: List[str], optional): Arguments to parse. If None is
+ given (e.g. do not pass any args), try to parse `sys.argv` .
+
+ Returns:
+ int: Exit status code
"""
assert getattr(self, "name", None) is not None, \
"{} does not have `name` attribute.".format(
@@ -85,20 +88,43 @@ class Command(metaclass=abc.ABCMeta):
@abc.abstractmethod
def run(self, args: 'argparse.Namespace') -> 'Union[ExitStatus, int]':
+ """This method must implement user defined action.
+
+ This method is an abstract method of this class. This should be
+ overwitten by the user. Return exit status code after execution.
+
+ Args:
+ args (argparse.Namespace): Parsed arguments.
+
+ Returns:
+ Union[ExitStatus, int]: Exit status code
+ """
raise NotImplementedError
def build_option(self, parser: 'argparse.ArgumentParser') \
-> 'argparse.ArgumentParser':
+ """Configure ArgumentParser to add user defined options of this command.
+
+ If you want to add your own option to this command, you can override
+ this function. Then, you can configure the parser given by argument
+ of this function.
+
+ Args:
+ parser (argparse.ArgumentParser): Parsed arguments.
+
+ Returns:
+ argparse.ArgumentParser: Configured argument parser.
+ """
return parser
def initialize(self, parser: 'Optional[argparse.ArgumentParser]' = None):
- """
- Initialize this command and its sub commands recursively.
- :param parser: `argparse.ArgumentParser` of parent command
- :return: None
+ """Initialize this command and its sub commands recursively.
+
+ Args:
+ parser (argparse.ArgumentParser): ArgumentParser of parent command
"""
if parser is None:
- self._parser = self.create_default_parser()
+ self._parser = self._create_default_parser()
else:
self._parser = parser
# Add validator
@@ -107,9 +133,9 @@ class Command(metaclass=abc.ABCMeta):
# Add function to execute
self._parser.set_defaults(func=self.run)
self.build_option(self._parser)
- self.initialize_sub_parsers(self._parser)
+ self._initialize_sub_parsers(self._parser)
- def initialize_sub_parsers(self, parser: 'argparse.ArgumentParser'):
+ def _initialize_sub_parsers(self, parser: 'argparse.ArgumentParser'):
if len(self.sub_commands) == 0:
return
parser = parser.add_subparsers(
@@ -125,7 +151,7 @@ class Command(metaclass=abc.ABCMeta):
)
cmd.initialize(sub_parser)
- def create_default_parser(self) -> 'argparse.ArgumentParser':
+ def _create_default_parser(self) -> 'argparse.ArgumentParser':
parser = argparse.ArgumentParser(
prog=self.name,
description=self.long_description,
@@ -135,10 +161,13 @@ class Command(metaclass=abc.ABCMeta):
return parser
def add_command(self, *commands: 'Command') -> 'Command':
- """
- Add sub command to this command.
- :param commands: An instance of `uroboros.command.Command`
- :return: None
+ """Add sub command to this command.
+
+ Add one or more commands to this command.
+ The added commands are callable as its sub command.
+
+ Args:
+ commands: An instance of sub commands
"""
for command in commands:
assert isinstance(command, Command), \
@@ -161,15 +190,27 @@ class Command(metaclass=abc.ABCMeta):
return {id(cmd) for cmd in self.sub_commands}
def register_parent(self, parent_ids: 'Set[int]'):
+ """Register parent command
+
+ This function is used internaly.
+ Register all parent command ids to check that the command has
+ already been registered.
+
+ Args:
+ parent_ids (Set[int]): Set of parent command instance ids
+ """
self._parent_ids |= parent_ids
for cmd in self.sub_commands:
cmd.register_parent(self._parent_ids)
def increment_nest(self, parent_layer_count: int):
- """
- Increment the depth of this command.
- :param parent_layer_count: Number of nest of parent command.
- :return: None
+ """Increment the depth of this command and its children.
+
+ This function is used internaly.
+ Increment the nest of this command and its children recursively.
+
+ Args:
+ parent_layer_count (int): Number of nest of parent command.
"""
self._layer = parent_layer_count + 1
# Propagate the increment to sub commands
@@ -177,12 +218,14 @@ class Command(metaclass=abc.ABCMeta):
cmd.increment_nest(self._layer)
def get_sub_commands(self, args: 'argparse.Namespace') -> 'List[Command]':
- """
- Get the list of `Command` specified by CLI except myself.
- if myself is root_cmd and "root_cmd first_cmd second_cmd"
+ """Get the list of `Command` specified by CLI except myself.
+
+ If myself is root_cmd and "root_cmd first_cmd second_cmd"
is specified in CLI, this may return the instances of first_cmd
and second_cmd.
- :return: List of `uroboros.Command`
+
+ Returns:
+ List[Command]: Child commands of this command.
"""
commands = []
# Do not include myself
@@ -193,11 +236,10 @@ class Command(metaclass=abc.ABCMeta):
layer += 1
return commands
- def get_all_sub_commands(self) -> 'Dict[Command, dict]':
- """
- Get the nested dictionary of `Command`.
+ def get_all_sub_commands(self) -> 'Dict[Command, CommandDict]':
+ """Get all child commands of this command including itself.
+
Traverse all sub commands of this command recursively.
- :return: Dictionary of command
{
self: {
first_command: {
@@ -213,6 +255,9 @@ class Command(metaclass=abc.ABCMeta):
...
},
}
+
+ Returns:
+ Dict[Command, CommandDict]: All child commands of this command.
"""
commands_dict = {}
for sub_cmd in self.sub_commands:
@@ -222,15 +267,22 @@ class Command(metaclass=abc.ABCMeta):
}
def get_options(self) -> 'List[Option]':
- """
- Get all `Option` instance of this `Command`.
- :return: List of Option instance
+ """Get all uroboros.`Option instance of this `Command.
+
+ Returns:
+ List[Option]: List of uroboros.Option instance
"""
return [opt() if type(opt) == type else opt for opt in self.options]
def print_help(self):
- """
- Helper method for print the help message of this command.
+ """Helper method for print the help message of this command.
+
+ Raises:
+ errors.CommandNotRegisteredError: If this command has
+ not been initialized.
+
+ Note:
+ This function can be called after initialization.
"""
self._check_initialized()
return self._parser.print_help()
@@ -262,14 +314,18 @@ class Command(metaclass=abc.ABCMeta):
def before_validate(self,
unsafe_args: 'argparse.Namespace'
) -> 'argparse.Namespace':
- """
- Hook function before validation. This method will be called
- in order from root command to its children.
+ """Hook function before validation
+
+ This method will be called in order from root command to its children.
Use `unsafe_args` carefully since it has not been validated yet.
You can set any value into `unsafe_args` and you must return it
finally.
- :param unsafe_args: An instance of argparse.Namespace
- :return: An instance of argparse.Namespace
+
+ Args:
+ unsafe_args (argparse.Namespace): Parsed arguments which
+ are not validated.
+ Returns:
+ argparse.Namespace: An instance of argparse.Namespace
"""
return unsafe_args
@@ -282,10 +338,13 @@ class Command(metaclass=abc.ABCMeta):
return exceptions
def validate(self, args: 'argparse.Namespace') -> 'List[Exception]':
- """
- Validate parameters of given options.
- :param args: Parsed arguments
- :return: The list of exceptions
+ """Validate parameters of given options.
+
+ Args:
+ args (argparse.Namespace): Parsed arguments which
+ are not validated.
+ Returns:
+ List[Exception]: The list of exceptions
"""
exceptions = []
for opt in self.options:
@@ -306,17 +365,27 @@ class Command(metaclass=abc.ABCMeta):
def after_validate(self,
safe_args: 'argparse.Namespace'
) -> 'argparse.Namespace':
- """
- Hook function after validation. This method will be called
- in order from root command to its children.
- Given argument `safe_args` is validated by validation method
- of your commands. You can set any value into `safe_args` and
- you must return it finally.
- :param safe_args: An instance of argparse.Namespace
- :return: An instance of argparse.Namespace
+ """Hook function after validation.
+
+ This method will be called in order from root command to
+ its children. Given argument `safe_args` is validated by
+ validation method of your commands. You can set any value
+ into `safe_args` and you must return it finally.
+
+ Args:
+ safe_args (argparse.Namespace): Validated arguments
+
+ Returns:
+ argparse.Namespace: An instance of argparse.Namespace
"""
return safe_args
def _check_initialized(self):
+ """Check that this command has been initialized.
+
+ Raises:
+ errors.CommandNotRegisteredError: If this command has
+ not been initialized.
+ """
if self._parser is None:
raise errors.CommandNotRegisteredError(self.name)
diff --git a/uroboros/errors.py b/uroboros/errors.py
index 19b12cc..f106ea3 100644
--- a/uroboros/errors.py
+++ b/uroboros/errors.py
@@ -5,6 +5,7 @@ if TYPE_CHECKING:
class CommandNotRegisteredError(Exception):
+ """The command is not initialized"""
def __init__(self, name: str):
self.name = name
@@ -25,6 +26,7 @@ class NoCommandError(Exception):
class CommandDuplicateError(Exception):
+ """Same command instance is added"""
def __init__(self, command: 'Command', parent: 'Command'):
self.command = command
diff --git a/uroboros/option.py b/uroboros/option.py
index 65bf35f..c98d7a4 100644
--- a/uroboros/option.py
+++ b/uroboros/option.py
@@ -8,6 +8,7 @@ if TYPE_CHECKING:
class Option(metaclass=abc.ABCMeta):
+ """Common option class"""
def __init__(self):
self.parser = argparse.ArgumentParser(add_help=False)
@@ -19,17 +20,64 @@ class Option(metaclass=abc.ABCMeta):
@abc.abstractmethod
def build_option(self, parser: 'argparse.ArgumentParser') \
-> 'argparse.ArgumentParser':
+ """Configure ArgumentParser to add user defined options of this command.
+
+ You must override this function. You should configure the parser given
+ by argument of this function, then return the parser.
+ This method and `uroboros.Command.build_option` method are functionally
+ equivalent.
+
+ Args:
+ parser (argparse.ArgumentParser): Initialized argument parser
+
+ Returns:
+ argparse.ArgumentParser: Configured argument parser.
+ """
raise NotImplementedError
def before_validate(self,
unsafe_args: 'argparse.Namespace'
) -> 'argparse.Namespace':
+ """Hook function before validation
+
+ This method will be called in order from root command to its children.
+ Use `unsafe_args` carefully since it has not been validated yet.
+ You can set any value into `unsafe_args` and you must return it
+ finally.
+
+ Args:
+ unsafe_args (argparse.Namespace): Parsed arguments which
+ are not validated.
+ Returns:
+ argparse.Namespace: An instance of argparse.Namespace
+ """
return unsafe_args
def validate(self, args: 'argparse.Namespace') -> 'List[Exception]':
+ """Validate parameters of given options.
+
+ Args:
+ args (argparse.Namespace): Parsed arguments which
+ are not validated.
+ Returns:
+ List[Exception]: The list of exceptions
+ """
return []
def after_validate(self,
safe_args: 'argparse.Namespace'
) -> 'argparse.Namespace':
+ """Hook function after validation.
+
+ This method will be called in order from root command to
+ its children. Given argument `safe_args` is validated by
+ validation method of your commands. You can set any value
+ into `safe_args` and you must return it finally.
+
+ Args:
+ safe_args (argparse.Namespace): Validated arguments
+
+ Returns:
+ argparse.Namespace: An instance of argparse.Namespace
+ """
return safe_args
diff --git a/uroboros/utils.py b/uroboros/utils.py
index 6a90229..ce072ac 100644
--- a/uroboros/utils.py
+++ b/uroboros/utils.py
@@ -1,16 +1,22 @@
def get_args_command_name(layer: int):
+ """Return the specified layer's command name"""
return "__layer{layer}_command".format(layer=layer)
def get_args_validator_name(layer: int):
+ """Return the specified layer's validator name"""
return "__layer{layer}_validator".format(layer=layer)
def get_args_section_name(layer: int):
+ """Return the specified layer's parser name"""
return "__layer{layer}_parser".format(layer=layer)
def call_one_by_one(objs, method_name: str, args, **kwargs):
+ """
+ Call specified method of given objects with given args in order.
+ """
for obj in objs:
assert hasattr(obj, method_name), \
"'{cmd}' has no method '{method}".format(
|
pddg/uroboros
|
63014842ca6938b79bead89a492ff060608885db
|
diff --git a/tests/test_command.py b/tests/test_command.py
index 05a703b..d3e2e34 100644
--- a/tests/test_command.py
+++ b/tests/test_command.py
@@ -347,7 +347,7 @@ class TestCommand(object):
argv = ["--test", "test"]
for cmd in [Cmd(), Cmd2()]:
- cmd_parser = cmd.create_default_parser()
+ cmd_parser = cmd._create_default_parser()
args = cmd_parser.parse_args(argv)
assert args.test == 'test'
|
Google style docstring
reST style docstring is not easy to read and write for me
|
0.0
|
63014842ca6938b79bead89a492ff060608885db
|
[
"tests/test_command.py::TestCommand::test_create_default_parser"
] |
[
"tests/test_command.py::TestCommand::test_build_option[command0]",
"tests/test_command.py::TestCommand::test_build_option[command1]",
"tests/test_command.py::TestCommand::test_build_option[command2]",
"tests/test_command.py::TestCommand::test_validate[command0]",
"tests/test_command.py::TestCommand::test_validate[command1]",
"tests/test_command.py::TestCommand::test_validate[command2]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set0]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set1]",
"tests/test_command.py::TestCommand::test_increment_nest[command_set2]",
"tests/test_command.py::TestCommand::test_register_parent[command_set0]",
"tests/test_command.py::TestCommand::test_register_parent[command_set1]",
"tests/test_command.py::TestCommand::test_register_parent[command_set2]",
"tests/test_command.py::TestCommand::test_add_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_command[command_set1]",
"tests/test_command.py::TestCommand::test_add_command[command_set2]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd0-add_commands0]",
"tests/test_command.py::TestCommand::test_multiple_add_command[root_cmd1-add_commands1]",
"tests/test_command.py::TestCommand::test_add_others",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set0]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set1]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set2]",
"tests/test_command.py::TestCommand::test_get_all_sub_commands[command_set3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set0-argv0]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set1-argv1]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set2-argv2]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set3-argv3]",
"tests/test_command.py::TestCommand::test_get_sub_commands[command_set4-argv4]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set0]",
"tests/test_command.py::TestCommand::test_add_duplicate_command[command_set1]",
"tests/test_command.py::TestCommand::test_execute[command_set0-root",
"tests/test_command.py::TestCommand::test_execute[command_set1-root",
"tests/test_command.py::TestCommand::test_execute[command_set2-root-False]",
"tests/test_command.py::TestCommand::test_execute[command_set3-root",
"tests/test_command.py::TestCommand::test_execute[command_set4-root",
"tests/test_command.py::TestCommand::test_execute[command_set5-root-False]",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set0-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set1-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set2-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set3-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set4-root",
"tests/test_command.py::TestCommand::test_violates_validation_argv[command_set5-root",
"tests/test_command.py::TestCommand::test_before_validate",
"tests/test_command.py::TestCommand::test_pre_hook[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook[commands2]",
"tests/test_command.py::TestCommand::test_after_validate",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands0]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands1]",
"tests/test_command.py::TestCommand::test_pre_hook_validated[commands2]",
"tests/test_command.py::TestCommand::test_get_options[option_objs0]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-05 07:38:53+00:00
|
apache-2.0
| 4,488 |
|
pdm-project__pdm-1187
|
diff --git a/news/1173.bugfix.md b/news/1173.bugfix.md
new file mode 100644
index 00000000..d6271486
--- /dev/null
+++ b/news/1173.bugfix.md
@@ -0,0 +1,1 @@
+Fix a bug that requirement extras and underlying are resolved to the different version
diff --git a/pdm/models/repositories.py b/pdm/models/repositories.py
index 1565d40c..76cf7a45 100644
--- a/pdm/models/repositories.py
+++ b/pdm/models/repositories.py
@@ -78,7 +78,11 @@ class BaseRepository:
# XXX: If the requirement has extras, add the original candidate
# (without extras) as its dependency. This ensures the same package with
# different extras resolve to the same version.
- self_req = dataclasses.replace(candidate.req, extras=None, marker=None)
+ self_req = dataclasses.replace(
+ candidate.req.as_pinned_version(candidate.version),
+ extras=None,
+ marker=None,
+ )
reqs.append(self_req)
# Store the metadata on the candidate for caching
candidate.requires_python = requires_python
|
pdm-project/pdm
|
96710e62b63973dfec467c80f07d04fc51074c59
|
diff --git a/tests/resolver/test_resolve.py b/tests/resolver/test_resolve.py
index 26106963..ac2b3acb 100644
--- a/tests/resolver/test_resolve.py
+++ b/tests/resolver/test_resolve.py
@@ -311,3 +311,14 @@ def test_resolve_extra_requirements_no_break_constraints(resolve, repository):
result = resolve(["foo[chardet]<0.2.0"])
assert "chardet" in result
assert result["foo"].version == "0.1.0"
+
+
+def test_resolve_extra_and_underlying_to_the_same_version(resolve, repository):
+ repository.add_candidate("foo", "0.1.0")
+ repository.add_dependencies("foo", "0.1.0", ["chardet; extra=='enc'"])
+ repository.add_candidate("foo", "0.2.0")
+ repository.add_dependencies("foo", "0.2.0", ["chardet; extra=='enc'"])
+ repository.add_candidate("bar", "0.1.0")
+ repository.add_dependencies("bar", "0.1.0", ["foo[enc]>=0.1.0"])
+ result = resolve(["foo==0.1.0", "bar"])
+ assert result["foo"].version == result["foo[enc]"].version == "0.1.0"
|
PDM 1.15.4+ unable to update lockfile when package with extras is pinned but extras are not pinned
## Summary
When using 1.15.4 we have seen some projects with large lists of dependencies unable to lock, and PDM eventually quits when it hits the max number of rounds. I think it's due to PDM's algorithm for handling extra dependencies. See the following comment for details.
#### Background
~PDM is unable to find dependency resolution and update the lockfile when run from a docker container in CI, eventually the job times out after 1 hour. When run on a workstation the problem doesn't occur, and the lockfile is able to be successfully updated.~
_edit: I thought the issue was the CI environment but it was because our developers were using v1.15.3 on their workstations._
A) One project A, I was unable to figure out why the lock was unsuccessful but when I updated to PDM v2.0.0a it was able to successfully lock both locally and in CI.... 🤔
B) On project B, a google-cloud-functions project I was able to investigate more, but I'm still confused and think the behavior is a bug.
report about project B continued in below
|
0.0
|
96710e62b63973dfec467c80f07d04fc51074c59
|
[
"tests/resolver/test_resolve.py::test_resolve_extra_and_underlying_to_the_same_version"
] |
[
"tests/resolver/test_resolve.py::test_resolve_named_requirement",
"tests/resolver/test_resolve.py::test_resolve_requires_python",
"tests/resolver/test_resolve.py::test_resolve_allow_prereleases",
"tests/resolver/test_resolve.py::test_resolve_with_extras",
"tests/resolver/test_resolve.py::test_resolve_local_artifacts[sdist]",
"tests/resolver/test_resolve.py::test_resolve_local_artifacts[wheel]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[False-/root/data/temp_dir/tmpwuzqdgqr/pdm-project__pdm__0.0/tests/fixtures/projects/demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[False-git+https://github.com/test-root/demo.git#egg=demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[True-/root/data/temp_dir/tmpwuzqdgqr/pdm-project__pdm__0.0/tests/fixtures/projects/demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[True-git+https://github.com/test-root/demo.git#egg=demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_without_explicit_name",
"tests/resolver/test_resolve.py::test_resolve_local_and_named_requirement",
"tests/resolver/test_resolve.py::test_resolving_auto_avoid_conflicts",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies_with_overrides[2.1]",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies_with_overrides[>=1.8]",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies_with_overrides[==2.1]",
"tests/resolver/test_resolve.py::test_resolve_no_available_versions",
"tests/resolver/test_resolve.py::test_exclude_incompatible_requirements",
"tests/resolver/test_resolve.py::test_union_markers_from_different_parents",
"tests/resolver/test_resolve.py::test_requirements_from_different_groups",
"tests/resolver/test_resolve.py::test_resolve_two_extras_from_the_same_package",
"tests/resolver/test_resolve.py::test_resolve_package_with_dummy_upbound",
"tests/resolver/test_resolve.py::test_resolve_dependency_with_extra_marker",
"tests/resolver/test_resolve.py::test_resolve_circular_dependencies",
"tests/resolver/test_resolve.py::test_resolve_candidates_to_install",
"tests/resolver/test_resolve.py::test_resolve_prefer_requirement_with_prereleases",
"tests/resolver/test_resolve.py::test_resolve_with_python_marker",
"tests/resolver/test_resolve.py::test_resolve_file_req_with_prerelease",
"tests/resolver/test_resolve.py::test_resolve_extra_requirements_no_break_constraints"
] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-30 06:20:49+00:00
|
mit
| 4,489 |
|
pdm-project__pdm-1254
|
diff --git a/news/1253.bugfix.md b/news/1253.bugfix.md
new file mode 100644
index 00000000..8e91ec5b
--- /dev/null
+++ b/news/1253.bugfix.md
@@ -0,0 +1,1 @@
+Support Conda environments when detecting the project environment.
diff --git a/news/1256.bugfix.md b/news/1256.bugfix.md
new file mode 100644
index 00000000..c111cf60
--- /dev/null
+++ b/news/1256.bugfix.md
@@ -0,0 +1,1 @@
+Stabilize sorting of URLs in `metadata.files` in `pdm.lock`.
diff --git a/pdm/cli/utils.py b/pdm/cli/utils.py
index e822f7fc..725d0c94 100644
--- a/pdm/cli/utils.py
+++ b/pdm/cli/utils.py
@@ -465,7 +465,7 @@ def format_lockfile(
continue
array = tomlkit.array().multiline(True)
for link, hash_value in sorted(
- v.hashes.items(), key=lambda l: l[0].filename
+ v.hashes.items(), key=lambda l_h: (l_h[0].url_without_fragment, l_h[1])
):
inline = make_inline_table(
{"url": link.url_without_fragment, "hash": hash_value}
diff --git a/pdm/project/core.py b/pdm/project/core.py
index 5fd54c5f..1a52a369 100644
--- a/pdm/project/core.py
+++ b/pdm/project/core.py
@@ -259,8 +259,8 @@ class Project:
if get_venv_like_prefix(self.python.executable) is not None
else Environment(self)
)
- if os.getenv("VIRTUAL_ENV"):
- venv = cast(str, os.getenv("VIRTUAL_ENV"))
+ venv = os.getenv("VIRTUAL_ENV", os.getenv("CONDA_PREFIX"))
+ if venv is not None:
self.core.ui.echo(
f"Detected inside an active virtualenv [green]{venv}[/], reuse it.",
style="yellow",
diff --git a/pdm/resolver/core.py b/pdm/resolver/core.py
index 4a6993c2..195af3bc 100644
--- a/pdm/resolver/core.py
+++ b/pdm/resolver/core.py
@@ -20,11 +20,10 @@ def resolve(
max_rounds: int = 10000,
) -> tuple[dict[str, Candidate], dict[str, list[Requirement]]]:
"""Core function to perform the actual resolve process.
- Return a tuple containing 3 items:
+ Return a tuple containing 2 items:
1. A map of pinned candidates
2. A map of resolved dependencies for each dependency group
- 3. A map of package descriptions fetched from PyPI source
"""
requirements.append(PythonRequirement.from_pyspec_set(requires_python))
provider = cast(BaseProvider, resolver.provider)
|
pdm-project/pdm
|
bfc827927cb78843dd7fa1bfa4a396351bc6c85a
|
diff --git a/tests/cli/test_lock.py b/tests/cli/test_lock.py
index 071ab9ff..80a10cf9 100644
--- a/tests/cli/test_lock.py
+++ b/tests/cli/test_lock.py
@@ -30,24 +30,23 @@ def test_lock_refresh(invoke, project, repository):
assert project.is_lockfile_hash_match()
assert not project.lockfile["metadata"]["files"].get("requests 2.19.1")
project.add_dependencies({"requests": parse_requirement("requests>=2.0")})
- repository.get_hashes = (
- lambda c: {
- Link(
- "http://example.com/requests-2.19.1-py3-none-any.whl"
- ): "sha256:abcdef123456"
- }
+ url_hashes = {
+ "http://example.com/requests-2.19.1-py3-none-any.whl": "sha256:abcdef123456",
+ "http://example2.com/requests-2.19.1-py3-none-AMD64.whl": "sha256:abcdef123456",
+ "http://example1.com/requests-2.19.1-py3-none-any.whl": "sha256:abcdef123456",
+ }
+ repository.get_hashes = lambda c: (
+ {Link(url): hash for url, hash in url_hashes.items()}
if c.identify() == "requests"
else {}
)
- print(project.lockfile)
assert not project.is_lockfile_hash_match()
result = invoke(["lock", "--refresh", "-v"], obj=project)
assert result.exit_code == 0
assert project.is_lockfile_hash_match()
- assert project.lockfile["metadata"]["files"]["requests 2.19.1"][0] == {
- "url": "http://example.com/requests-2.19.1-py3-none-any.whl",
- "hash": "sha256:abcdef123456",
- }
+ assert project.lockfile["metadata"]["files"]["requests 2.19.1"] == [
+ {"url": url, "hash": hash} for url, hash in sorted(url_hashes.items())
+ ]
def test_lock_refresh_keep_consistent(invoke, project, repository):
|
Environment detection fails for Conda environments
- [x] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
- Enable `python.use_venv`
- Run `pdm install` in a project using an activated Conda environment
## Actual behavior
PDM tries to create a virtual environment.
## Expected behavior
PDM uses the existing Conda environment
## Environment Information
First call to `pdm info`
```bash
PDM version:
2.0.2
Python Interpreter:
/home/angus/.mambaforge/envs/texat/bin/python (3.10)
Project Root:
/home/angus/Git/texat
Project Packages:
None
{
"implementation_name": "cpython",
"implementation_version": "3.10.5",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "5.15.0-41-generic",
"platform_system": "Linux",
"platform_version": "#44-Ubuntu SMP Wed Jun 22 14:20:53 UTC 2022",
"python_full_version": "3.10.5",
"platform_python_implementation": "CPython",
"python_version": "3.10",
"sys_platform": "linux"
}
```
Second call
```bash
PDM version:
2.0.2
Python Interpreter:
/home/angus/Git/texat/.venv/bin/python (3.10)
Project Root:
/home/angus/Git/texat
Project Packages:
None
{
"implementation_name": "cpython",
"implementation_version": "3.10.5",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "5.15.0-41-generic",
"platform_system": "Linux",
"platform_version": "#44-Ubuntu SMP Wed Jun 22 14:20:53 UTC 2022",
"python_full_version": "3.10.5",
"platform_python_implementation": "CPython",
"python_version": "3.10",
"sys_platform": "linux"
}
```
|
0.0
|
bfc827927cb78843dd7fa1bfa4a396351bc6c85a
|
[
"tests/cli/test_lock.py::test_lock_refresh"
] |
[
"tests/cli/test_lock.py::test_lock_command",
"tests/cli/test_lock.py::test_lock_dependencies",
"tests/cli/test_lock.py::test_lock_refresh_keep_consistent",
"tests/cli/test_lock.py::test_innovations_with_specified_lockfile",
"tests/cli/test_lock.py::test_skip_editable_dependencies_in_metadata"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-21 09:52:51+00:00
|
mit
| 4,490 |
|
pdm-project__pdm-1257
|
diff --git a/news/1256.bugfix.md b/news/1256.bugfix.md
new file mode 100644
index 00000000..c111cf60
--- /dev/null
+++ b/news/1256.bugfix.md
@@ -0,0 +1,1 @@
+Stabilize sorting of URLs in `metadata.files` in `pdm.lock`.
diff --git a/pdm/cli/utils.py b/pdm/cli/utils.py
index e822f7fc..725d0c94 100644
--- a/pdm/cli/utils.py
+++ b/pdm/cli/utils.py
@@ -465,7 +465,7 @@ def format_lockfile(
continue
array = tomlkit.array().multiline(True)
for link, hash_value in sorted(
- v.hashes.items(), key=lambda l: l[0].filename
+ v.hashes.items(), key=lambda l_h: (l_h[0].url_without_fragment, l_h[1])
):
inline = make_inline_table(
{"url": link.url_without_fragment, "hash": hash_value}
diff --git a/pdm/resolver/core.py b/pdm/resolver/core.py
index 4a6993c2..195af3bc 100644
--- a/pdm/resolver/core.py
+++ b/pdm/resolver/core.py
@@ -20,11 +20,10 @@ def resolve(
max_rounds: int = 10000,
) -> tuple[dict[str, Candidate], dict[str, list[Requirement]]]:
"""Core function to perform the actual resolve process.
- Return a tuple containing 3 items:
+ Return a tuple containing 2 items:
1. A map of pinned candidates
2. A map of resolved dependencies for each dependency group
- 3. A map of package descriptions fetched from PyPI source
"""
requirements.append(PythonRequirement.from_pyspec_set(requires_python))
provider = cast(BaseProvider, resolver.provider)
|
pdm-project/pdm
|
bfc827927cb78843dd7fa1bfa4a396351bc6c85a
|
diff --git a/tests/cli/test_lock.py b/tests/cli/test_lock.py
index 071ab9ff..80a10cf9 100644
--- a/tests/cli/test_lock.py
+++ b/tests/cli/test_lock.py
@@ -30,24 +30,23 @@ def test_lock_refresh(invoke, project, repository):
assert project.is_lockfile_hash_match()
assert not project.lockfile["metadata"]["files"].get("requests 2.19.1")
project.add_dependencies({"requests": parse_requirement("requests>=2.0")})
- repository.get_hashes = (
- lambda c: {
- Link(
- "http://example.com/requests-2.19.1-py3-none-any.whl"
- ): "sha256:abcdef123456"
- }
+ url_hashes = {
+ "http://example.com/requests-2.19.1-py3-none-any.whl": "sha256:abcdef123456",
+ "http://example2.com/requests-2.19.1-py3-none-AMD64.whl": "sha256:abcdef123456",
+ "http://example1.com/requests-2.19.1-py3-none-any.whl": "sha256:abcdef123456",
+ }
+ repository.get_hashes = lambda c: (
+ {Link(url): hash for url, hash in url_hashes.items()}
if c.identify() == "requests"
else {}
)
- print(project.lockfile)
assert not project.is_lockfile_hash_match()
result = invoke(["lock", "--refresh", "-v"], obj=project)
assert result.exit_code == 0
assert project.is_lockfile_hash_match()
- assert project.lockfile["metadata"]["files"]["requests 2.19.1"][0] == {
- "url": "http://example.com/requests-2.19.1-py3-none-any.whl",
- "hash": "sha256:abcdef123456",
- }
+ assert project.lockfile["metadata"]["files"]["requests 2.19.1"] == [
+ {"url": url, "hash": hash} for url, hash in sorted(url_hashes.items())
+ ]
def test_lock_refresh_keep_consistent(invoke, project, repository):
|
metadata.files sorting in pdm.lock changes between computers
## Steps to reproduce
* Add several package sources which contain the same wheels with different hashes (for example https://piwheels.org for deploying to raspberry pi)
* run `pdm lock` on different computers (linux vs windows)
## Actual behavior
The following diff is observed between the two computers
```diff
"uritemplate 3.0.1" = [
- {url = "https://files.pythonhosted.org/packages/bf/0c/60d82c077998feb631608dca3cc1fe19ac074e772bf0c24cf409b977b815/uritemplate-3.0.1-py2.py3-none-any.whl", hash = "sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f"},
{url = "https://www.piwheels.org/simple/uritemplate/uritemplate-3.0.1-py2.py3-none-any.whl", hash = "sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f"},
+ {url = "https://files.pythonhosted.org/packages/bf/0c/60d82c077998feb631608dca3cc1fe19ac074e772bf0c24cf409b977b815/uritemplate-3.0.1-py2.py3-none-any.whl", hash = "sha256:07620c3f3f8eed1f12600845892b0e036a2420acf513c53f7de0abd911a5894f"},
{url = "https://files.pythonhosted.org/packages/42/da/fa9aca2d866f932f17703b3b5edb7b17114bb261122b6e535ef0d9f618f8/uritemplate-3.0.1.tar.gz", hash = "sha256:5af8ad10cec94f215e3f48112de2022e1d5a37ed427fbd88652fa908f2ab7cae"},
]
```
## Expected behavior
pdm.lock should be the same no matter which computer last locked it.
## Environment Information
Debian docker image with PDM 2.0.2 and WIndows 10 workstation with PDM 2.0.2
|
0.0
|
bfc827927cb78843dd7fa1bfa4a396351bc6c85a
|
[
"tests/cli/test_lock.py::test_lock_refresh"
] |
[
"tests/cli/test_lock.py::test_lock_command",
"tests/cli/test_lock.py::test_lock_dependencies",
"tests/cli/test_lock.py::test_lock_refresh_keep_consistent",
"tests/cli/test_lock.py::test_innovations_with_specified_lockfile",
"tests/cli/test_lock.py::test_skip_editable_dependencies_in_metadata"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-21 21:14:13+00:00
|
mit
| 4,491 |
|
pdm-project__pdm-1421
|
diff --git a/news/1412.feature.md b/news/1412.feature.md
new file mode 100644
index 00000000..dccc1444
--- /dev/null
+++ b/news/1412.feature.md
@@ -0,0 +1,1 @@
+Allow `pdm init` to receive a Python path or version via `--python` option.
diff --git a/news/1418.bugfix.md b/news/1418.bugfix.md
new file mode 100644
index 00000000..46f6daca
--- /dev/null
+++ b/news/1418.bugfix.md
@@ -0,0 +1,1 @@
+Show the Python path, instead of the real executable, in the Python selection menu.
diff --git a/src/pdm/cli/actions.py b/src/pdm/cli/actions.py
index ebd39cfd..8931e5ec 100644
--- a/src/pdm/cli/actions.py
+++ b/src/pdm/cli/actions.py
@@ -658,7 +658,7 @@ def do_use(
if not matching_interpreters:
project.core.ui.echo("Interpreters found but not matching:", err=True)
for py in found_interpreters:
- project.core.ui.echo(f" - {py.executable} ({py.identifier})", err=True)
+ project.core.ui.echo(f" - {py.path} ({py.identifier})", err=True)
raise NoPythonVersion(
"No python is found meeting the requirement "
f"[green]python {str(project.python_requires)}[/]"
@@ -669,7 +669,7 @@ def do_use(
project.core.ui.echo("Please enter the Python interpreter to use")
for i, py_version in enumerate(matching_interpreters):
project.core.ui.echo(
- f"{i}. [green]{str(py_version.executable)}[/] "
+ f"{i}. [green]{str(py_version.path)}[/] "
f"({py_version.identifier})"
)
selection = termui.ask(
@@ -684,7 +684,7 @@ def do_use(
use_cache.set(python, selected_python.path.as_posix())
if not selected_python.valid:
- path = str(selected_python.executable)
+ path = str(selected_python.path)
raise InvalidPyVersion(f"Invalid Python interpreter: {path}")
if not save:
return selected_python
@@ -695,13 +695,13 @@ def do_use(
)
project.core.ui.echo(
"Using Python interpreter: "
- f"[green]{str(selected_python.executable)}[/] "
+ f"[green]{str(selected_python.path)}[/] "
f"({selected_python.identifier})"
)
project.python = selected_python
if (
old_python
- and old_python.path != selected_python.path
+ and old_python.executable != selected_python.executable
and not project.environment.is_global
):
project.core.ui.echo("Updating executable scripts...", style="cyan")
diff --git a/src/pdm/cli/commands/init.py b/src/pdm/cli/commands/init.py
index ab4304b3..aa42e1de 100644
--- a/src/pdm/cli/commands/init.py
+++ b/src/pdm/cli/commands/init.py
@@ -33,6 +33,7 @@ class Command(BaseCommand):
action="store_true",
help="Don't ask questions but use default values",
)
+ parser.add_argument("--python", help="Specify the Python version/path to use")
parser.set_defaults(search_parent=False)
def handle(self, project: Project, options: argparse.Namespace) -> None:
@@ -48,7 +49,13 @@ class Command(BaseCommand):
self.set_interactive(not options.non_interactive)
if self.interactive:
- python = actions.do_use(project, ignore_requires_python=True, hooks=hooks)
+ python = actions.do_use(
+ project,
+ options.python or "",
+ first=bool(options.python),
+ ignore_requires_python=True,
+ hooks=hooks,
+ )
if (
project.config["python.use_venv"]
and get_venv_like_prefix(python.executable) is None
@@ -72,7 +79,12 @@ class Command(BaseCommand):
)
else:
python = actions.do_use(
- project, "3", True, ignore_requires_python=True, save=False, hooks=hooks
+ project,
+ options.python or "3",
+ True,
+ ignore_requires_python=True,
+ save=False,
+ hooks=hooks,
)
if get_venv_like_prefix(python.executable) is None:
project.core.ui.echo(
|
pdm-project/pdm
|
609d1e9c5240aa07b9f8b032faf9164ef39cc3be
|
diff --git a/tests/cli/test_init.py b/tests/cli/test_init.py
index 4de793b5..dbfce5ce 100644
--- a/tests/cli/test_init.py
+++ b/tests/cli/test_init.py
@@ -6,6 +6,8 @@ import pytest
from pdm.cli import actions
from pdm.models.python import PythonInfo
+PYTHON_VERSION = f"{sys.version_info[0]}.{sys.version_info[1]}"
+
def test_init_validate_python_requires(project_no_init):
with pytest.raises(ValueError):
@@ -103,6 +105,21 @@ def test_init_auto_create_venv(project_no_init, invoke, mocker):
)
+def test_init_auto_create_venv_specify_python(project_no_init, invoke, mocker):
+ mocker.patch("pdm.cli.commands.init.get_venv_like_prefix", return_value=None)
+ project_no_init.project_config["python.use_venv"] = True
+ result = invoke(
+ ["init", f"--python={PYTHON_VERSION}"],
+ input="\n\n\n\n\n\n",
+ obj=project_no_init,
+ )
+ assert result.exit_code == 0
+ assert (
+ project_no_init.python.executable.parent.parent
+ == project_no_init.root / ".venv"
+ )
+
+
def test_init_auto_create_venv_answer_no(project_no_init, invoke, mocker):
mocker.patch("pdm.cli.commands.init.get_venv_like_prefix", return_value=None)
creator = mocker.patch("pdm.cli.commands.venv.backends.Backend.create")
|
Allow PDM init to receive a Python version
## Is your feature request related to a problem? Please describe.
Yes. It is impossible to create a new project for a specific python version without interaction.
If I use `pdm ini --non-interactive` then I cannot change version using CLI, because it is not compatible with `requires-python` setting that was automatically selected.
## Describe the solution you'd like
A solution would be to allow the init command to receive the version of python that I want to use:
```bash
pdm init 3.8.10
```
Or allow the user to force change it later:
```bash
pdm use 3.8.10 --force
```
|
0.0
|
609d1e9c5240aa07b9f8b032faf9164ef39cc3be
|
[
"tests/cli/test_init.py::test_init_auto_create_venv_specify_python"
] |
[
"tests/cli/test_init.py::test_init_validate_python_requires",
"tests/cli/test_init.py::test_init_command",
"tests/cli/test_init.py::test_init_command_library",
"tests/cli/test_init.py::test_init_non_interactive",
"tests/cli/test_init.py::test_init_auto_create_venv",
"tests/cli/test_init.py::test_init_auto_create_venv_answer_no"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-07 06:29:29+00:00
|
mit
| 4,492 |
|
pdm-project__pdm-1518
|
diff --git a/news/1516.bugfix.md b/news/1516.bugfix.md
new file mode 100644
index 00000000..aeb4b7cb
--- /dev/null
+++ b/news/1516.bugfix.md
@@ -0,0 +1,1 @@
+Fix the test failure with the latest `findpython` installed.
diff --git a/pdm.lock b/pdm.lock
index 11487e8f..0e7aae6b 100644
--- a/pdm.lock
+++ b/pdm.lock
@@ -124,7 +124,7 @@ summary = "A platform independent file lock."
[[package]]
name = "findpython"
-version = "0.2.0"
+version = "0.2.2"
requires_python = ">=3.7"
summary = "A utility to find python versions on your system"
dependencies = [
@@ -783,9 +783,9 @@ content_hash = "sha256:8254688e0619bf5e694bcd7ac7033d800b93c1764234994273a0bc459
{url = "https://files.pythonhosted.org/packages/a6/d5/17f02b379525d1ff9678bfa58eb9548f561c8826deb0b85797aa0eed582d/filelock-3.7.1-py3-none-any.whl", hash = "sha256:37def7b658813cda163b56fc564cdc75e86d338246458c4c28ae84cabefa2404"},
{url = "https://files.pythonhosted.org/packages/f3/c7/5c1aef87f1197d2134a096c0264890969213c9cbfb8a4102087e8d758b5c/filelock-3.7.1.tar.gz", hash = "sha256:3a0fd85166ad9dbab54c9aec96737b744106dc5f15c0b09a6744a445299fcf04"},
]
-"findpython 0.2.0" = [
- {url = "https://files.pythonhosted.org/packages/17/16/7e7e7f6ae157e903c3285533f5d52f34fa8f25c8f4745699294b154e83fc/findpython-0.2.0.tar.gz", hash = "sha256:c2099ee0b71fc2714b64f68fd1f40bc0ee47f49dfe9547fb64d7cbcc02fe0871"},
- {url = "https://files.pythonhosted.org/packages/6f/a5/538cce025f4eae09f138c4fcab5eb3c8fe4584006b0c9759e2554a274efc/findpython-0.2.0-py3-none-any.whl", hash = "sha256:110ec222a43aca3fcd154fd90b911f465c70e86787ae0532bab2266a95870fc9"},
+"findpython 0.2.2" = [
+ {url = "https://files.pythonhosted.org/packages/19/b4/6c35b7e741c18fb6d5be35e2d9099dc1a05a8525b45ebbb6985d101f2b62/findpython-0.2.2-py3-none-any.whl", hash = "sha256:62717187e728c3d38b2754dfd7e6b5cfa503ed3608b9ad93cb80dbf2497e92e0"},
+ {url = "https://files.pythonhosted.org/packages/28/96/ec16612c4384cfca9381239d06e9285ca41d749d4a5003df73e3a96255e7/findpython-0.2.2.tar.gz", hash = "sha256:80557961c04cf1c8c4ba4ca3ac7cf76ec27fa92788a6af42cb701e3450c49430"},
]
"ghp-import 2.1.0" = [
{url = "https://files.pythonhosted.org/packages/d9/29/d40217cbe2f6b1359e00c6c307bb3fc876ba74068cbab3dde77f03ca0dc4/ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"},
diff --git a/src/pdm/cli/actions.py b/src/pdm/cli/actions.py
index 3be0a325..0e7f0b09 100644
--- a/src/pdm/cli/actions.py
+++ b/src/pdm/cli/actions.py
@@ -31,12 +31,7 @@ from pdm.cli.utils import (
set_env_in_reg,
translate_groups,
)
-from pdm.exceptions import (
- InvalidPyVersion,
- NoPythonVersion,
- PdmUsageError,
- ProjectError,
-)
+from pdm.exceptions import NoPythonVersion, PdmUsageError, ProjectError
from pdm.formats import FORMATS
from pdm.formats.base import array_of_inline_tables, make_array, make_inline_table
from pdm.models.backends import BuildBackend
@@ -579,8 +574,10 @@ def do_use(
python = python.strip()
def version_matcher(py_version: PythonInfo) -> bool:
- return ignore_requires_python or project.python_requires.contains(
- str(py_version.version), True
+ return (
+ ignore_requires_python
+ or py_version.valid
+ and project.python_requires.contains(str(py_version.version), True)
)
if not project.cache_dir.exists():
@@ -642,9 +639,6 @@ def do_use(
if python:
use_cache.set(python, selected_python.path.as_posix())
- if not selected_python.valid:
- path = str(selected_python.path)
- raise InvalidPyVersion(f"Invalid Python interpreter: {path}")
if not save:
return selected_python
old_python = (
diff --git a/src/pdm/models/python.py b/src/pdm/models/python.py
index 33802b72..e15146b6 100644
--- a/src/pdm/models/python.py
+++ b/src/pdm/models/python.py
@@ -5,7 +5,7 @@ from pathlib import Path
from typing import Any
from findpython import PythonVersion
-from packaging.version import Version
+from packaging.version import InvalidVersion, Version
from pdm.compat import cached_property
@@ -72,6 +72,9 @@ class PythonInfo:
@property
def identifier(self) -> str:
- if os.name == "nt" and self.is_32bit:
- return f"{self.major}.{self.minor}-32"
- return f"{self.major}.{self.minor}"
+ try:
+ if os.name == "nt" and self.is_32bit:
+ return f"{self.major}.{self.minor}-32"
+ return f"{self.major}.{self.minor}"
+ except InvalidVersion:
+ return "unknown"
|
pdm-project/pdm
|
26f2a178e378442f9aaa7785a583f9f3c9e98a5b
|
diff --git a/tests/cli/test_use.py b/tests/cli/test_use.py
index ca1c5731..e65cac76 100644
--- a/tests/cli/test_use.py
+++ b/tests/cli/test_use.py
@@ -6,7 +6,7 @@ from pathlib import Path
import pytest
from pdm.cli import actions
-from pdm.exceptions import InvalidPyVersion
+from pdm.exceptions import NoPythonVersion
from pdm.models.caches import JSONFileCache
@@ -54,7 +54,7 @@ echo hello
shim_path = project.root.joinpath("python_shim.sh")
shim_path.write_text(wrapper_script)
shim_path.chmod(0o755)
- with pytest.raises(InvalidPyVersion):
+ with pytest.raises(NoPythonVersion):
actions.do_use(project, shim_path.as_posix())
|
test_use_invalid_wrapper_python test fails
While packaging pdm 2.2.1 for openSUSE/Factory we have discovered failing test test_use_invalid_wrapper_python:
- [x] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
Run the test suite:
```
[ 198s] =================================== FAILURES ===================================
[ 198s] _______________________ test_use_invalid_wrapper_python ________________________
[ 198s]
[ 198s] project = <Project '/tmp/pytest-of-abuild/pytest-0/test_use_invalid_wrapper_pytho0'>
[ 198s]
[ 198s] @pytest.mark.skipif(os.name != "posix", reason="Run on POSIX platforms only")
[ 198s] def test_use_invalid_wrapper_python(project):
[ 198s] wrapper_script = """#!/bin/bash
[ 198s] echo hello
[ 198s] """
[ 198s] shim_path = project.root.joinpath("python_shim.sh")
[ 198s] shim_path.write_text(wrapper_script)
[ 198s] shim_path.chmod(0o755)
[ 198s] with pytest.raises(InvalidPyVersion):
[ 198s] > actions.do_use(project, shim_path.as_posix())
[ 198s]
[ 198s] tests/cli/test_use.py:60:
[ 198s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 198s] /usr/lib/python3.8/site-packages/pdm/cli/actions.py:617: in do_use
[ 198s] matching_interpreters = list(filter(version_matcher, found_interpreters))
[ 198s] /usr/lib/python3.8/site-packages/pdm/cli/actions.py:588: in version_matcher
[ 198s] str(py_version.version), True
[ 198s] /usr/lib/python3.8/site-packages/pdm/models/python.py:48: in version
[ 198s] return self._py_ver.version
[ 198s] /usr/lib/python3.8/site-packages/findpython/python.py:65: in version
[ 198s] self._version = self._get_version()
[ 198s] /usr/lib/python3.8/site-packages/findpython/python.py:165: in _get_version
[ 198s] return Version(version)
[ 198s] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[ 198s]
[ 198s] self = <[AttributeError("'Version' object has no attribute '_version'") raised in repr()] Version object at 0x7ff1cd2fbd00>
[ 198s] version = 'hello'
[ 198s]
[ 198s] def __init__(self, version: str) -> None:
[ 198s]
[ 198s] # Validate the version and parse it into pieces
[ 198s] match = self._regex.search(version)
[ 198s] if not match:
[ 198s] > raise InvalidVersion(f"Invalid version: '{version}'")
[ 198s] E packaging.version.InvalidVersion: Invalid version: 'hello'
[ 198s]
[ 198s] /usr/lib/python3.8/site-packages/packaging/version.py:266: InvalidVersion
[ 198s] ---------------------------- Captured stdout setup -----------------------------
[ 198s] Changes are written to pyproject.toml.
[ 198s] =============================== warnings summary ===============================
[ 198s] tests/test_integration.py:31
[ 198s] /home/abuild/rpmbuild/BUILD/pdm-2.2.1/tests/test_integration.py:31: PytestUnknownMarkWarning: Unknown pytest.mark.flaky - is this a typo? You can register custom marks to avoid this warning - for details, see https://docs.pytest.org/en/stable/how-to/mark.html
[ 198s] @pytest.mark.flaky(reruns=3)
[ 198s]
[ 198s] -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
[ 198s] =========================== short test summary info ============================
[ 198s] FAILED tests/cli/test_use.py::test_use_invalid_wrapper_python - packaging.ver...
[ 198s] ===== 1 failed, 585 passed, 12 deselected, 1 warning in 184.12s (0:03:04) ======
```
## Actual behavior
Test fails
## Expected behavior
Whole test suite passes.
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
stitny/t/pdm-test$ pdm info
PDM version:
2.2.1
Python Interpreter:
/tmp/pdm-test/.venv/bin/python (3.10)
Project Root:
/tmp/pdm-test
Project Packages:
None
stitny/t/pdm-test$ pdm info --env
{
"implementation_name": "cpython",
"implementation_version": "3.10.8",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "6.0.8-1-default",
"platform_system": "Linux",
"platform_version": "#1 SMP PREEMPT_DYNAMIC Fri Nov 11 08:02:50 UTC 2022 (1579d93)",
"python_full_version": "3.10.8",
"platform_python_implementation": "CPython",
"python_version": "3.10",
"sys_platform": "linux"
}
stitny/t/pdm-test$
```
Complete [build log](https://github.com/pdm-project/pdm/files/10015455/_log-test.txt) for version of packages used and steps taken.
|
0.0
|
26f2a178e378442f9aaa7785a583f9f3c9e98a5b
|
[
"tests/cli/test_use.py::test_use_invalid_wrapper_python"
] |
[
"tests/cli/test_use.py::test_use_command",
"tests/cli/test_use.py::test_use_python_by_version",
"tests/cli/test_use.py::test_use_wrapper_python",
"tests/cli/test_use.py::test_use_remember_last_selection"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-16 01:25:57+00:00
|
mit
| 4,493 |
|
pdm-project__pdm-1533
|
diff --git a/docs/docs/usage/scripts.md b/docs/docs/usage/scripts.md
index 4dd61794..42e66639 100644
--- a/docs/docs/usage/scripts.md
+++ b/docs/docs/usage/scripts.md
@@ -183,6 +183,66 @@ migrate_db = "flask db upgrade"
Besides, inside the tasks, `PDM_PROJECT_ROOT` environment variable will be set to the project root.
+### Arguments placeholder
+
+By default, all user provided extra arguments are simply appended to the command (or to all the commands for `composite` tasks).
+
+If you want more control over the user provided extra arguments, you can use the `{args}` placeholder.
+It is available for all script types and will be interpolated properly for each:
+
+```toml
+[tool.pdm.scripts]
+cmd = "echo '--before {args} --after'"
+shell = {shell = "echo '--before {args} --after'"}
+composite = {composite = ["cmd --something", "shell {args}"]}
+```
+
+will produce the following interpolations (those are not real scripts, just here to illustrate the interpolation):
+
+```shell
+$ pdm run cmd --user --provided
+--before --user --provided --after
+$ pdm run cmd
+--before --after
+$ pdm run shell --user --provided
+--before --user --provided --after
+$ pdm run shell
+--before --after
+$ pdm run composite --user --provided
+cmd --something
+shell --before --user --provided --after
+$ pdm run composite
+cmd --something
+shell --before --after
+```
+
+You may optionally provide default values that will be used if no user arguments are provided:
+
+```toml
+[tool.pdm.scripts]
+test = "echo '--before {args:--default --value} --after'"
+```
+
+will produce the following:
+
+```shell
+$ pdm run test --user --provided
+--before --user --provided --after
+$ pdm run test
+--before --default --value --after
+```
+
+!!! note
+ As soon a placeholder is detected, arguments are not appended anymore.
+ This is important for `composite` scripts because if a placeholder
+ is detected on one of the subtasks, none for the subtasks will have
+ the arguments appended, you need to explicitly pass the placeholder
+ to every nested command requiring it.
+
+!!! note
+ `call` scripts don't support the `{args}` placeholder as they have
+ access to `sys.argv` directly to handle such complexe cases and more.
+
## Show the List of Scripts
Use `pdm run --list/-l` to show the list of available script shortcuts:
diff --git a/news/1507.feature.md b/news/1507.feature.md
new file mode 100644
index 00000000..b02d5589
--- /dev/null
+++ b/news/1507.feature.md
@@ -0,0 +1,1 @@
+Allows specifying the insertion position of user provided arguments in scripts with the `{args[:default]}` placeholder.
diff --git a/src/pdm/cli/commands/run.py b/src/pdm/cli/commands/run.py
index c169dce7..f6708a35 100644
--- a/src/pdm/cli/commands/run.py
+++ b/src/pdm/cli/commands/run.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import argparse
+import itertools
import os
import re
import shlex
@@ -8,7 +9,7 @@ import signal
import subprocess
import sys
from types import FrameType
-from typing import Any, Callable, Mapping, NamedTuple, Sequence, cast
+from typing import Any, Callable, Iterator, Mapping, NamedTuple, Sequence, cast
from pdm import signals, termui
from pdm.cli.actions import PEP582_PATH
@@ -46,6 +47,20 @@ def exec_opts(*options: TaskOptions | None) -> dict[str, Any]:
)
+RE_ARGS_PLACEHOLDER = re.compile(r"{args(?::(?P<default>[^}]*))?}")
+
+
+def interpolate(script: str, args: Sequence[str]) -> tuple[str, bool]:
+ """Interpolate the `{args:[defaults]} placeholder in a string"""
+
+ def replace(m: re.Match[str]) -> str:
+ default = m.group("default") or ""
+ return " ".join(args) if args else default
+
+ interpolated, count = RE_ARGS_PLACEHOLDER.subn(replace, script)
+ return interpolated, count > 0
+
+
class Task(NamedTuple):
kind: str
name: str
@@ -216,12 +231,24 @@ class TaskRunner:
kind, _, value, options = task
shell = False
if kind == "cmd":
- if not isinstance(value, list):
- value = shlex.split(str(value))
- args = value + list(args)
+ if isinstance(value, str):
+ cmd, interpolated = interpolate(value, args)
+ value = shlex.split(cmd)
+ else:
+ agg = [interpolate(part, args) for part in value]
+ interpolated = any(row[1] for row in agg)
+ # In case of multiple default, we need to split the resulting string.
+ parts: Iterator[list[str]] = (
+ shlex.split(part) if interpolated else [part]
+ for part, interpolated in agg
+ )
+ # We flatten the nested list to obtain a list of arguments
+ value = list(itertools.chain(*parts))
+ args = value if interpolated else [*value, *args]
elif kind == "shell":
assert isinstance(value, str)
- args = " ".join([value] + list(args)) # type: ignore
+ script, interpolated = interpolate(value, args)
+ args = script if interpolated else " ".join([script, *args])
shell = True
elif kind == "call":
assert isinstance(value, str)
@@ -241,7 +268,6 @@ class TaskRunner:
] + list(args)
elif kind == "composite":
assert isinstance(value, list)
- args = list(args)
self.project.core.ui.echo(
f"Running {task}: [success]{str(args)}[/]",
@@ -249,10 +275,16 @@ class TaskRunner:
verbosity=termui.Verbosity.DETAIL,
)
if kind == "composite":
+ args = list(args)
+ should_interpolate = any(
+ (RE_ARGS_PLACEHOLDER.search(script) for script in value)
+ )
for script in value:
+ if should_interpolate:
+ script, _ = interpolate(script, args)
split = shlex.split(script)
cmd = split[0]
- subargs = split[1:] + args # type: ignore
+ subargs = split[1:] + ([] if should_interpolate else args)
code = self.run(cmd, subargs, options)
if code != 0:
return code
|
pdm-project/pdm
|
378a170f3bafcd474da505c3b768888e25c34898
|
diff --git a/tests/cli/test_run.py b/tests/cli/test_run.py
index bf86c192..5385307b 100644
--- a/tests/cli/test_run.py
+++ b/tests/cli/test_run.py
@@ -144,6 +144,50 @@ def test_run_shell_script(project, invoke):
assert (project.root / "output.txt").read_text().strip() == "hello"
[email protected](
+ "args,expected",
+ (
+ pytest.param(["hello"], "ok hello", id="with-args"),
+ pytest.param([], "ok", id="without-args"),
+ ),
+)
+def test_run_shell_script_with_args_placeholder(project, invoke, args, expected):
+ project.pyproject.settings["scripts"] = {
+ "test_script": {
+ "shell": "echo ok {args} > output.txt",
+ "help": "test it won't fail",
+ }
+ }
+ project.pyproject.write()
+ with cd(project.root):
+ result = invoke(["run", "test_script", *args], obj=project)
+ assert result.exit_code == 0
+ assert (project.root / "output.txt").read_text().strip() == expected
+
+
[email protected](
+ "args,expected",
+ (
+ pytest.param(["hello"], "hello", id="with-args"),
+ pytest.param([], "default", id="with-default"),
+ ),
+)
+def test_run_shell_script_with_args_placeholder_with_default(
+ project, invoke, args, expected
+):
+ project.pyproject.settings["scripts"] = {
+ "test_script": {
+ "shell": "echo {args:default} > output.txt",
+ "help": "test it won't fail",
+ }
+ }
+ project.pyproject.write()
+ with cd(project.root):
+ result = invoke(["run", "test_script", *args], obj=project)
+ assert result.exit_code == 0
+ assert (project.root / "output.txt").read_text().strip() == expected
+
+
def test_run_call_script(project, invoke):
(project.root / "test_script.py").write_text(
textwrap.dedent(
@@ -189,6 +233,74 @@ def test_run_script_with_extra_args(project, invoke, capfd):
assert out.splitlines()[-3:] == ["-a", "-b", "-c"]
[email protected](
+ "args,expected",
+ (
+ pytest.param(["-a", "-b", "-c"], ["-a", "-b", "-c", "-x"], id="with-args"),
+ pytest.param([], ["-x"], id="without-args"),
+ ),
+)
[email protected](
+ "script",
+ (
+ pytest.param("python test_script.py {args} -x", id="as-str"),
+ pytest.param(["python", "test_script.py", "{args}", "-x"], id="as-list"),
+ ),
+)
+def test_run_script_with_args_placeholder(
+ project, invoke, capfd, script, args, expected
+):
+ (project.root / "test_script.py").write_text(
+ textwrap.dedent(
+ """
+ import sys
+ print(*sys.argv[1:], sep='\\n')
+ """
+ )
+ )
+ project.pyproject.settings["scripts"] = {"test_script": script}
+ project.pyproject.write()
+ with cd(project.root):
+ invoke(["run", "-v", "test_script", *args], obj=project)
+ out, _ = capfd.readouterr()
+ assert out.strip().splitlines()[1:] == expected
+
+
[email protected](
+ "args,expected",
+ (
+ pytest.param(["-a", "-b", "-c"], ["-a", "-b", "-c", "-x"], id="with-args"),
+ pytest.param([], ["--default", "--value", "-x"], id="default"),
+ ),
+)
[email protected](
+ "script",
+ (
+ pytest.param("python test_script.py {args:--default --value} -x", id="as-str"),
+ pytest.param(
+ ["python", "test_script.py", "{args:--default --value}", "-x"], id="as-list"
+ ),
+ ),
+)
+def test_run_script_with_args_placeholder_with_default(
+ project, invoke, capfd, script, args, expected
+):
+ (project.root / "test_script.py").write_text(
+ textwrap.dedent(
+ """
+ import sys
+ print(*sys.argv[1:], sep='\\n')
+ """
+ )
+ )
+ project.pyproject.settings["scripts"] = {"test_script": script}
+ project.pyproject.write()
+ with cd(project.root):
+ invoke(["run", "-v", "test_script", *args], obj=project)
+ out, _ = capfd.readouterr()
+ assert out.strip().splitlines()[1:] == expected
+
+
def test_run_expand_env_vars(project, invoke, capfd, monkeypatch):
(project.root / "test_script.py").write_text("import os; print(os.getenv('FOO'))")
project.pyproject.settings["scripts"] = {
@@ -488,6 +600,52 @@ def test_composite_can_pass_parameters(project, invoke, capfd, _args):
assert "Post-Test CALLED" in out
[email protected](
+ "args,expected",
+ (
+ pytest.param(["-a"], "-a, ", id="with-args"),
+ pytest.param([], "", id="without-args"),
+ ),
+)
+def test_composite_only_pass_parameters_to_subtasks_with_args(
+ project, invoke, capfd, _args, args, expected
+):
+ project.pyproject.settings["scripts"] = {
+ "test": {"composite": ["first", "second {args} key=value"]},
+ "first": "python args.py First",
+ "second": "python args.py Second",
+ }
+ project.pyproject.write()
+ capfd.readouterr()
+ invoke(["run", "-v", "test", *args], strict=True, obj=project)
+ out, _ = capfd.readouterr()
+ assert "First CALLED" in out
+ assert f"Second CALLED with {expected}key=value" in out
+
+
[email protected](
+ "args,expected",
+ (
+ pytest.param(["-a"], "-a", id="with-args"),
+ pytest.param([], "--default", id="default"),
+ ),
+)
+def test_composite_only_pass_parameters_to_subtasks_with_args_with_default(
+ project, invoke, capfd, _args, args, expected
+):
+ project.pyproject.settings["scripts"] = {
+ "test": {"composite": ["first", "second {args:--default} key=value"]},
+ "first": "python args.py First",
+ "second": "python args.py Second",
+ }
+ project.pyproject.write()
+ capfd.readouterr()
+ invoke(["run", "-v", "test", *args], strict=True, obj=project)
+ out, _ = capfd.readouterr()
+ assert "First CALLED" in out
+ assert f"Second CALLED with {expected}, key=value" in out
+
+
def test_composite_hooks_inherit_env(project, invoke, capfd, _echo):
project.pyproject.settings["scripts"] = {
"pre_task": {"cmd": "python echo.py Pre-Task VAR", "env": {"VAR": "42"}},
|
Allows Specifying the Insertion Position of Extra Arguments Supplied to `pdm run`
## Is your feature request related to a problem? Please describe.
I'm using PDM scripts to both run commands in PDM environment, and also as a shorthand for providing common parameters.
For example, in a Django project, it's common to run commands with `manage.py`. I will use script like
```toml
manage = { cmd = [
'python', 'manage.py',
] }
```
So that I can use command `pdm manage migrate`, instead of `pdm run python manage.py migrate`. Where the common prefix `python manage.py ...` can be reused.
However, I found that one of my tools (specifically, [`pre-commit`](https://pre-commit.com/)) has commands and arguments in this format:
```shell
pre-commit install --config somewhere/config.yml
pre-commit autoupdate --config somewhere/config.yml
pre-commit run --config somewhere/config.yml
```
The parameters like `--config` must come after commands like `install`. If I create script
```toml
pre-commit = { cmd = [
'pre-commit', '--config', 'somewhere/config.yml',
] }
```
and execute `pdm run pre-commit install`, the additional argument `install` will always be appeneded to the `cmd` and resulting
```
pre-commit --config somewhere/config.yml install
```
which is invalid for `pre-commit`.
Since the common parts (`pre-commit` and `--config somewhere/config.yml`) of the commands are not a prefix, there is no way for me to create shorthand PDM scripts for them.
**It would be greate if we can specify the insertion position of extra arguments supplied to `pdm run`.**
## Describe the solution you'd like
We can provide an optional placeholder to the `cmd`. e.g.:
```toml
pre-commit = { cmd = [
'pre-commit', '$args', '--config', 'somewhere/config.yml',
] }
```
|
0.0
|
378a170f3bafcd474da505c3b768888e25c34898
|
[
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder[with-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder[without-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder_with_default[with-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder_with_default[with-default]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-str-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-str-without-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-list-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-list-without-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-str-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-str-default]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-list-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-list-default]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args[with-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args[without-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args_with_default[with-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args_with_default[default]"
] |
[
"tests/cli/test_run.py::test_auto_isolate_site_packages",
"tests/cli/test_run.py::test_run_with_site_packages",
"tests/cli/test_run.py::test_run_command_not_found",
"tests/cli/test_run.py::test_run_pass_exit_code",
"tests/cli/test_run.py::test_run_cmd_script",
"tests/cli/test_run.py::test_run_cmd_script_with_array",
"tests/cli/test_run.py::test_run_script_pass_project_root",
"tests/cli/test_run.py::test_run_shell_script",
"tests/cli/test_run.py::test_run_call_script",
"tests/cli/test_run.py::test_run_script_with_extra_args",
"tests/cli/test_run.py::test_run_expand_env_vars",
"tests/cli/test_run.py::test_run_script_with_env_defined",
"tests/cli/test_run.py::test_run_script_with_dotenv_file",
"tests/cli/test_run.py::test_run_script_override_global_env",
"tests/cli/test_run.py::test_run_show_list_of_scripts",
"tests/cli/test_run.py::test_import_another_sitecustomize",
"tests/cli/test_run.py::test_run_with_patched_sysconfig",
"tests/cli/test_run.py::test_run_composite",
"tests/cli/test_run.py::test_composite_stops_on_first_failure",
"tests/cli/test_run.py::test_composite_inherit_env",
"tests/cli/test_run.py::test_composite_fail_on_first_missing_task",
"tests/cli/test_run.py::test_composite_runs_all_hooks",
"tests/cli/test_run.py::test_composite_pass_parameters_to_subtasks",
"tests/cli/test_run.py::test_composite_can_pass_parameters",
"tests/cli/test_run.py::test_composite_hooks_inherit_env",
"tests/cli/test_run.py::test_composite_inherit_env_in_cascade",
"tests/cli/test_run.py::test_composite_inherit_dotfile",
"tests/cli/test_run.py::test_composite_can_have_commands",
"tests/cli/test_run.py::test_run_shortcut",
"tests/cli/test_run.py::test_run_shortcuts_dont_override_commands",
"tests/cli/test_run.py::test_run_shortcut_fail_with_usage_if_script_not_found",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[no",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[unknown",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[not"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-25 13:12:54+00:00
|
mit
| 4,494 |
|
pdm-project__pdm-1632
|
diff --git a/news/1619.bugfix.md b/news/1619.bugfix.md
new file mode 100644
index 00000000..eca4a386
--- /dev/null
+++ b/news/1619.bugfix.md
@@ -0,0 +1,1 @@
+Fix the wildcards in requirement specifiers to make it pass the new parser of `packaging>=22`.
diff --git a/src/pdm/models/requirements.py b/src/pdm/models/requirements.py
index 49f0c33a..65e4a0b6 100644
--- a/src/pdm/models/requirements.py
+++ b/src/pdm/models/requirements.py
@@ -26,6 +26,7 @@ from pdm.models.markers import Marker, get_marker, split_marker_extras
from pdm.models.setup import Setup
from pdm.models.specifiers import PySpecSet, get_specifier
from pdm.utils import (
+ PACKAGING_22,
add_ssh_scheme_to_git_uri,
comparable_version,
normalize_name,
@@ -36,6 +37,8 @@ from pdm.utils import (
)
if TYPE_CHECKING:
+ from typing import Match
+
from pdm._types import RequirementDict
@@ -458,6 +461,34 @@ def filter_requirements_with_extras(
return result
+_legacy_specifier_re = re.compile(r"(==|!=|<=|>=|<|>)(\s*)([^,;\s)]*)")
+
+
+def parse_as_pkg_requirement(line: str) -> PackageRequirement:
+ """Parse a requirement line as packaging.requirement.Requirement"""
+
+ def fix_wildcard(match: Match[str]) -> str:
+ operator, _, version = match.groups()
+ if ".*" not in version or operator in ("==", "!="):
+ return match.group(0)
+ version = version.replace(".*", ".0")
+ if operator in ("<", "<="): # <4.* and <=4.* are equivalent to <4.0
+ operator = "<"
+ elif operator in (">", ">="): # >4.* and >=4.* are equivalent to >=4.0
+ operator = ">="
+ return f"{operator}{version}"
+
+ try:
+ return PackageRequirement(line)
+ except InvalidRequirement:
+ if not PACKAGING_22: # We can't do anything, reraise the error.
+ raise
+ # Since packaging 22.0, legacy specifiers like '>=4.*' are no longer
+ # supported. We try to normalize them to the new format.
+ new_line = _legacy_specifier_re.sub(fix_wildcard, line)
+ return PackageRequirement(new_line)
+
+
def parse_requirement(line: str, editable: bool = False) -> Requirement:
m = _vcs_req_re.match(line)
@@ -475,14 +506,14 @@ def parse_requirement(line: str, editable: bool = False) -> Requirement:
if replaced:
line = line.replace("{root:uri}", root_url)
try:
- package_req = PackageRequirement(line) # type: ignore
+ pkg_req = parse_as_pkg_requirement(line)
except InvalidRequirement as e:
m = _file_req_re.match(line)
if m is None:
raise RequirementError(str(e)) from None
r = FileRequirement.create(**m.groupdict())
else:
- r = Requirement.from_pkg_requirement(package_req)
+ r = Requirement.from_pkg_requirement(pkg_req)
if replaced:
assert isinstance(r, FileRequirement)
r.url = r.url.replace(root_url, "{root:uri}")
diff --git a/src/pdm/utils.py b/src/pdm/utils.py
index 50fdd2c6..77187f80 100644
--- a/src/pdm/utils.py
+++ b/src/pdm/utils.py
@@ -23,9 +23,10 @@ from typing import IO, Any, Iterator
from packaging.version import Version
from pdm._types import Source
-from pdm.compat import Distribution
+from pdm.compat import Distribution, importlib_metadata
_egg_fragment_re = re.compile(r"(.*)[#&]egg=[^&]*")
+PACKAGING_22 = Version(importlib_metadata.version("packaging")) >= Version("22")
def create_tracked_tempdir(
|
pdm-project/pdm
|
69fa5b3fbc67f99c90b473cf759b36aa025ef744
|
diff --git a/tests/__init__.py b/tests/__init__.py
index c3b9bc68..bea50964 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -1,7 +1,3 @@
from pathlib import Path
-import packaging
-from packaging.version import Version
-
FIXTURES = Path(__file__).parent / "fixtures"
-PACKAGING_22 = Version(packaging.__version__) >= Version("22.0")
diff --git a/tests/models/test_requirements.py b/tests/models/test_requirements.py
index c9f707de..d18c1eab 100644
--- a/tests/models/test_requirements.py
+++ b/tests/models/test_requirements.py
@@ -51,6 +51,8 @@ REQUIREMENTS = [
"[email protected]:pypa/pip.git#egg=pip",
"pip @ git+ssh://[email protected]/pypa/pip.git",
),
+ ("foo >=4.*, <=5.*", "foo<5.0,>=4.0"),
+ ("foo (>=4.*, <=5.*)", "foo<5.0,>=4.0"),
]
diff --git a/tests/models/test_specifiers.py b/tests/models/test_specifiers.py
index c611f6d3..362034e3 100644
--- a/tests/models/test_specifiers.py
+++ b/tests/models/test_specifiers.py
@@ -1,7 +1,7 @@
import pytest
from pdm.models.specifiers import PySpecSet
-from tests import PACKAGING_22
+from pdm.utils import PACKAGING_22
@pytest.mark.parametrize(
|
[BUG] Invalid version spec `>=5.*, <=6.*` creates unhandled junk dependency `b''`
- [X] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
`pdm add pyaml-env==1.2.0` with a fresh `pyproject.toml` like the following:
```toml
[project]
name = ""
version = ""
description = ""
authors = [
{name = "Gregory Danielson", email = "[email protected]"},
]
dependencies = []
requires-python = ">=3.6"
license = {text = "MIT"}
[tool.pdm]
```
You can also set up a minimal project with the following `setup.py` and attempt to install it:
```py
from setuptools import setup
REQUIREMENTS = ["PyYAML>=5.*"]
setup(name="project",
version="0.1.0",
install_requires=REQUIREMENTS,
package_dir={"": "src"},
packages=["project"])
```
## Actual behavior
```
pdm.termui: ======== Starting round 1 ========
unearth.preparer: Using cached <Link https://files.pythonhosted.org/packages/d2/6c/fb09236297c1934448966abed6aaa2b2bed9f78992ef0346343e326398f4/pyaml_env-1.2.0-py3-none-any.whl (from https://pypi.org/simple/pyaml-env/)>
pdm.termui: Adding requirement b''(from pyaml-env 1.2.0)
[FileRequirement(name=None, marker=None, extras=None, specifier=None, editable=False, prerelease=False, url=None, path=PosixPath("b''"), subdirectory=None)]
Traceback (most recent call last):
File "/home/gregdan3/.local/bin/pdm", line 8, in <module>
sys.exit(main())
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 259, in main
return Core().main(args)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 191, in main
raise cast(Exception, err).with_traceback(traceback)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 186, in main
f(project, options)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/cli/commands/install.py", line 58, in handle
actions.do_lock(
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/cli/actions.py", line 95, in do_lock
mapping, dependencies = resolve(
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/resolver/core.py", line 35, in resolve
result = resolver.resolve(requirements, max_rounds)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 521, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 402, in resolve
failure_causes = self._attempt_to_pin_criterion(name)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 238, in _attempt_to_pin_criterion
criteria = self._get_updated_criteria(candidate)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 229, in _get_updated_criteria
self._add_to_criteria(criteria, requirement, parent=candidate)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 172, in _add_to_criteria
if not criterion.candidates:
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/structs.py", line 127, in __bool__
next(iter(self))
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/structs.py", line 136, in __iter__
self._factory() if self._iterable is None else self._iterable
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/resolver/providers.py", line 151, in matches_gen
candidates = self._find_candidates(reqs[0])
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/resolver/providers.py", line 127, in _find_candidates
can = make_candidate(requirement)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/models/candidates.py", line 626, in make_candidate
return Candidate(req, name, version, link)
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/models/candidates.py", line 145, in __init__
link = req.as_file_link() # type: ignore
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/models/requirements.py", line 321, in as_file_link
return Link(url)
File "<string>", line 9, in __init__
File "/home/gregdan3/.local/pipx/venvs/pdm/lib/python3.10/site-packages/unearth/link.py", line 54, in __post_init__
if self.url.startswith(vcs_prefixes):
TypeError: a bytes-like object is required, not 'str'
```
## Expected behavior
Version specs such as [those previously in pyaml-env](https://github.com/mkaranasou/pyaml_env/blob/acca18ffbf28ab03cf9aff34ce17db5d09bb2c93/requirements.txt) should be detected as breaking PEP440 rather than assumed to be FileRequirements.
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
PDM version:
2.4.0
Python Interpreter:
/usr/bin/python3 (3.10)
Project Root:
/home/gregdan3/example
Project Packages:
/home/gregdan3/example/__pypackages__/3.10
{
"implementation_name": "cpython",
"implementation_version": "3.10.9",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "6.0.12-arch1-1",
"platform_system": "Linux",
"platform_version": "#1 SMP PREEMPT_DYNAMIC Thu, 08 Dec 2022 11:03:38 +0000",
"python_full_version": "3.10.9",
"platform_python_implementation": "CPython",
"python_version": "3.10",
"sys_platform": "linux"
}
```
## Other Notes
`pyaml-env` has already fixed this bug [here](https://github.com/mkaranasou/pyaml_env/issues/30).
The issue results from [this logic](https://github.com/pdm-project/pdm/blob/e1f593963544c725c215134d479211ff7eab4e12/src/pdm/models/requirements.py#L479), where any failed attempt to create a `PackageRequirement` is subsequently parsed as a `FileRequirement`. `_file_req_re` matches the line, producing a FileRequirement with all null fields except for ; parsing the url produces a `b''` that gets caught in the `results` [here](https://github.com/pdm-project/pdm/blob/e1f593963544c725c215134d479211ff7eab4e12/src/pdm/models/requirements.py#L452).
The `b''` obviously fails the next round of dependency resolution.
Ideally, there would be an independent way to verify the FileRequirement so it could be more than a secondary case of the PackageRequirement.
|
0.0
|
69fa5b3fbc67f99c90b473cf759b36aa025ef744
|
[
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests-None]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests<2.21.0,>=2.20.0-None]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests==2.19.0;",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests[security,tests]==2.8.*,>=2.8.1;",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[pip",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[git+http://git.example.com/MyProject.git@master#egg=MyProject-MyProject",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[https://github.com/pypa/pip/archive/1.3.1.zip-None]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[/root/data/temp_dir/tmpom8cfgaq/pdm-project__pdm__0.0/tests/fixtures/projects/demo-demo",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[/root/data/temp_dir/tmpom8cfgaq/pdm-project__pdm__0.0/tests/fixtures/artifacts/demo-0.0.1-py2.py3-none-any.whl-demo",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[/root/data/temp_dir/tmpom8cfgaq/pdm-project__pdm__0.0/tests/fixtures/projects/demo[security]-demo[security]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests;",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[[email protected]:pypa/pip.git#egg=pip-pip",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[foo",
"tests/models/test_requirements.py::test_illegal_requirement_line[requests;",
"tests/models/test_requirements.py::test_illegal_requirement_line[./tests-The",
"tests/models/test_requirements.py::test_not_supported_editable_requirement[requests",
"tests/models/test_requirements.py::test_not_supported_editable_requirement[https://github.com/pypa/pip/archive/1.3.1.zip]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>=3.6->=3.6]",
"tests/models/test_specifiers.py::test_normalize_pyspec[<3.8-<3.8]",
"tests/models/test_specifiers.py::test_normalize_pyspec[~=2.7.0->=2.7,<2.8]",
"tests/models/test_specifiers.py::test_normalize_pyspec[-]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>=3.6,<3.8->=3.6,<3.8]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>3.6->=3.6.1]",
"tests/models/test_specifiers.py::test_normalize_pyspec[<=3.7-<3.7.1]",
"tests/models/test_specifiers.py::test_normalize_pyspec[<3.3,!=3.4.*,!=3.5.*-<3.3]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>=3.6,!=3.4.*->=3.6]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>=3.6,!=3.6.*->=3.7]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>=3.6,<3.8,!=3.8.*->=3.6,<3.8]",
"tests/models/test_specifiers.py::test_normalize_pyspec[>=2.7,<3.2,!=3.0.*,!=3.1.*->=2.7,<3.0]",
"tests/models/test_specifiers.py::test_normalize_pyspec[!=3.0.*,!=3.0.2-!=3.0.*]",
"tests/models/test_specifiers.py::test_normalize_pyspec[<3.10.0a6-<3.10.0a6]",
"tests/models/test_specifiers.py::test_normalize_pyspec[<3.10.2a3-<3.10.2a3]",
"tests/models/test_specifiers.py::test_pyspec_and_op[>=3.6->=3.0->=3.6]",
"tests/models/test_specifiers.py::test_pyspec_and_op[>=3.6-<3.8->=3.6,<3.8]",
"tests/models/test_specifiers.py::test_pyspec_and_op[->=3.6->=3.6]",
"tests/models/test_specifiers.py::test_pyspec_and_op[>=3.6-<3.2-impossible]",
"tests/models/test_specifiers.py::test_pyspec_and_op[>=2.7,!=3.0.*-!=3.1.*->=2.7,!=3.0.*,!=3.1.*]",
"tests/models/test_specifiers.py::test_pyspec_and_op[>=3.11.0a2-<3.11.0b->=3.11.0a2,<3.11.0b0]",
"tests/models/test_specifiers.py::test_pyspec_and_op[<3.11.0a2->3.11.0b-impossible]",
"tests/models/test_specifiers.py::test_pyspec_or_op[>=3.6->=3.0->=3.0]",
"tests/models/test_specifiers.py::test_pyspec_or_op[->=3.6-]",
"tests/models/test_specifiers.py::test_pyspec_or_op[>=3.6-<3.7-]",
"tests/models/test_specifiers.py::test_pyspec_or_op[>=3.6,<3.8->=3.4,<3.7->=3.4,<3.8]",
"tests/models/test_specifiers.py::test_pyspec_or_op[~=2.7->=3.6->=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*]",
"tests/models/test_specifiers.py::test_pyspec_or_op[<2.7.15->=3.0-!=2.7.15,!=2.7.16,!=2.7.17,!=2.7.18]",
"tests/models/test_specifiers.py::test_pyspec_or_op[>3.11.0a2->3.11.0b->=3.11.0a3]",
"tests/models/test_specifiers.py::test_impossible_pyspec",
"tests/models/test_specifiers.py::test_pyspec_is_subset_superset[~=2.7->=2.7]",
"tests/models/test_specifiers.py::test_pyspec_is_subset_superset[>=3.6-]",
"tests/models/test_specifiers.py::test_pyspec_is_subset_superset[>=3.7->=3.6,<4.0]",
"tests/models/test_specifiers.py::test_pyspec_is_subset_superset[>=2.7,<3.0->=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*]",
"tests/models/test_specifiers.py::test_pyspec_is_subset_superset[>=3.6->=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*]",
"tests/models/test_specifiers.py::test_pyspec_is_subset_superset[>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*->=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*]",
"tests/models/test_specifiers.py::test_pyspec_isnot_subset_superset[~=2.7->=2.6,<2.7.15]",
"tests/models/test_specifiers.py::test_pyspec_isnot_subset_superset[>=3.7->=3.6,<3.9]",
"tests/models/test_specifiers.py::test_pyspec_isnot_subset_superset[>=3.7,<3.6-==2.7]",
"tests/models/test_specifiers.py::test_pyspec_isnot_subset_superset[>=3.0,!=3.4.*->=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*]",
"tests/models/test_specifiers.py::test_pyspec_isnot_subset_superset[>=3.11.0-<3.11.0a]"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-18 11:01:19+00:00
|
mit
| 4,495 |
|
pdm-project__pdm-1712
|
diff --git a/news/1702.bugfix.md b/news/1702.bugfix.md
new file mode 100644
index 00000000..bf9269c7
--- /dev/null
+++ b/news/1702.bugfix.md
@@ -0,0 +1,1 @@
+Fix a resolution failure when the project has cascading relative path dependencies.
diff --git a/src/pdm/models/repositories.py b/src/pdm/models/repositories.py
index 6ae8308b..52bbf459 100644
--- a/src/pdm/models/repositories.py
+++ b/src/pdm/models/repositories.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import dataclasses
+import posixpath
import sys
from functools import lru_cache, wraps
from typing import TYPE_CHECKING, Any, Callable, Iterable, Mapping, TypeVar, cast
@@ -17,7 +18,13 @@ from pdm.models.requirements import (
)
from pdm.models.search import SearchResultParser
from pdm.models.specifiers import PySpecSet
-from pdm.utils import cd, normalize_name, url_without_fragments
+from pdm.utils import (
+ cd,
+ normalize_name,
+ path_to_url,
+ url_to_path,
+ url_without_fragments,
+)
if TYPE_CHECKING:
from pdm._types import CandidateInfo, SearchResult, Source
@@ -392,8 +399,8 @@ class LockedRepository(BaseRepository):
return {can.req.identify(): can for can in self.packages.values()}
def _read_lockfile(self, lockfile: Mapping[str, Any]) -> None:
- with cd(self.environment.project.root):
- backend = self.environment.project.backend
+ root = self.environment.project.root
+ with cd(root):
for package in lockfile.get("package", []):
version = package.get("version")
if version:
@@ -406,8 +413,8 @@ class LockedRepository(BaseRepository):
}
req = Requirement.from_req_dict(package_name, req_dict)
if req.is_file_or_url and req.path and not req.url: # type: ignore
- req.url = backend.relative_path_to_url( # type: ignore
- req.path.as_posix() # type: ignore
+ req.url = path_to_url( # type: ignore
+ posixpath.join(root, req.path) # type: ignore
)
can = make_candidate(req, name=package_name, version=version)
can_id = self._identify_candidate(can)
@@ -426,10 +433,16 @@ class LockedRepository(BaseRepository):
def _identify_candidate(self, candidate: Candidate) -> tuple:
url = getattr(candidate.req, "url", None)
+ if url is not None:
+ url = url_without_fragments(url)
+ url = self.environment.project.backend.expand_line(url)
+ if url.startswith("file://"):
+ path = posixpath.normpath(url_to_path(url))
+ url = path_to_url(path)
return (
candidate.identify(),
candidate.version if not url else None,
- url_without_fragments(url) if url else None,
+ url,
candidate.req.editable,
)
diff --git a/src/pdm/models/requirements.py b/src/pdm/models/requirements.py
index fb39f60c..77206405 100644
--- a/src/pdm/models/requirements.py
+++ b/src/pdm/models/requirements.py
@@ -5,6 +5,7 @@ import functools
import inspect
import json
import os
+import posixpath
import re
import secrets
import urllib.parse as urlparse
@@ -278,8 +279,15 @@ class FileRequirement(Requirement):
def str_path(self) -> str | None:
if not self.path:
return None
- result = self.path.as_posix()
- if not self.path.is_absolute() and not result.startswith(("./", "../")):
+ if self.path.is_absolute():
+ try:
+ result = self.path.relative_to(Path.cwd()).as_posix()
+ except ValueError:
+ return self.path.as_posix()
+ else:
+ result = self.path.as_posix()
+ result = posixpath.normpath(result)
+ if not result.startswith(("./", "../")):
result = "./" + result
if result.startswith("./../"):
result = result[2:]
diff --git a/src/pdm/resolver/providers.py b/src/pdm/resolver/providers.py
index 77b3301f..3bfae8ef 100644
--- a/src/pdm/resolver/providers.py
+++ b/src/pdm/resolver/providers.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import os
from typing import TYPE_CHECKING, Callable, cast
from packaging.specifiers import InvalidSpecifier, SpecifierSet
@@ -7,7 +8,7 @@ from resolvelib import AbstractProvider
from pdm.models.candidates import Candidate, make_candidate
from pdm.models.repositories import LockedRepository
-from pdm.models.requirements import parse_requirement, strip_extras
+from pdm.models.requirements import FileRequirement, parse_requirement, strip_extras
from pdm.resolver.python import (
PythonCandidate,
PythonRequirement,
@@ -157,18 +158,23 @@ class BaseProvider(AbstractProvider):
return matches_gen
+ def _compare_file_reqs(self, req1: FileRequirement, req2: FileRequirement) -> bool:
+ backend = self.repository.environment.project.backend
+ if req1.path and req2.path:
+ return os.path.normpath(req1.path) == os.path.normpath(req2.path)
+ left = backend.expand_line(url_without_fragments(req1.get_full_url()))
+ right = backend.expand_line(url_without_fragments(req2.get_full_url()))
+ return left == right
+
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
if isinstance(requirement, PythonRequirement):
return is_python_satisfied_by(requirement, candidate)
elif candidate.identify() in self.overrides:
return True
if not requirement.is_named:
- backend = self.repository.environment.project.backend
- return not candidate.req.is_named and backend.expand_line(
- url_without_fragments(candidate.req.get_full_url()) # type: ignore
- ) == backend.expand_line(
- url_without_fragments(requirement.get_full_url()) # type: ignore
- )
+ if candidate.req.is_named:
+ return False
+ return self._compare_file_reqs(requirement, candidate.req) # type: ignore
version = candidate.version
this_name = self.repository.environment.project.name
if version is None or candidate.name == this_name:
|
pdm-project/pdm
|
72c82f5441fe3b5e30358e9e7c1ea73647b852bf
|
diff --git a/tests/cli/test_install.py b/tests/cli/test_install.py
index d5b82ae7..4fd16d9c 100644
--- a/tests/cli/test_install.py
+++ b/tests/cli/test_install.py
@@ -3,6 +3,7 @@ import pytest
from pdm.cli import actions
from pdm.models.requirements import parse_requirement
from pdm.pytest import Distribution
+from pdm.utils import cd
@pytest.mark.usefixtures("repository")
@@ -206,7 +207,6 @@ def test_sync_with_pure_option(project, working_set, invoke):
assert "django" not in working_set
[email protected]("repository")
def test_install_referencing_self_package(project, working_set, invoke):
project.add_dependencies({"pytz": parse_requirement("pytz")}, to_group="tz")
project.add_dependencies({"urllib3": parse_requirement("urllib3")}, to_group="web")
@@ -216,3 +216,11 @@ def test_install_referencing_self_package(project, working_set, invoke):
invoke(["install", "-Gall"], obj=project, strict=True)
assert "pytz" in working_set
assert "urllib3" in working_set
+
+
+def test_install_monorepo_with_rel_paths(fixture_project, invoke, working_set):
+ project = fixture_project("test-monorepo")
+ with cd(project.root):
+ invoke(["install"], obj=project, strict=True)
+ for package in ("package-a", "package-b", "core"):
+ assert package in working_set
diff --git a/tests/conftest.py b/tests/conftest.py
index 40611276..957b4a26 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -99,6 +99,7 @@ def fixture_project(project_no_init):
source = FIXTURES / "projects" / project_name
copytree(source, project_no_init.root)
project_no_init.pyproject.reload()
+ project_no_init.environment = None
return project_no_init
return func
diff --git a/tests/fixtures/projects/test-monorepo/README.md b/tests/fixtures/projects/test-monorepo/README.md
new file mode 100644
index 00000000..39e237d2
--- /dev/null
+++ b/tests/fixtures/projects/test-monorepo/README.md
@@ -0,0 +1,1 @@
+# pdm_test
diff --git a/tests/fixtures/projects/test-monorepo/core/core.py b/tests/fixtures/projects/test-monorepo/core/core.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/fixtures/projects/test-monorepo/core/pyproject.toml b/tests/fixtures/projects/test-monorepo/core/pyproject.toml
new file mode 100644
index 00000000..7dbe43b5
--- /dev/null
+++ b/tests/fixtures/projects/test-monorepo/core/pyproject.toml
@@ -0,0 +1,10 @@
+[project]
+name = "core"
+version = "0.0.1"
+description = ""
+requires-python = ">= 3.7"
+dependencies = []
+
+[build-system]
+requires = ["pdm-pep517"]
+build-backend = "pdm.pep517.api"
diff --git a/tests/fixtures/projects/test-monorepo/package_a/alice.py b/tests/fixtures/projects/test-monorepo/package_a/alice.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/fixtures/projects/test-monorepo/package_a/pyproject.toml b/tests/fixtures/projects/test-monorepo/package_a/pyproject.toml
new file mode 100644
index 00000000..95cd0d92
--- /dev/null
+++ b/tests/fixtures/projects/test-monorepo/package_a/pyproject.toml
@@ -0,0 +1,12 @@
+[project]
+name = "package_a"
+version = "0.0.1"
+description = ""
+requires-python = ">= 3.7"
+dependencies = [
+ "core @ file:///${PROJECT_ROOT}/../core",
+]
+
+[build-system]
+requires = ["pdm-pep517"]
+build-backend = "pdm.pep517.api"
diff --git a/tests/fixtures/projects/test-monorepo/package_b/bob.py b/tests/fixtures/projects/test-monorepo/package_b/bob.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/fixtures/projects/test-monorepo/package_b/pyproject.toml b/tests/fixtures/projects/test-monorepo/package_b/pyproject.toml
new file mode 100644
index 00000000..6f673c0c
--- /dev/null
+++ b/tests/fixtures/projects/test-monorepo/package_b/pyproject.toml
@@ -0,0 +1,12 @@
+[project]
+name = "package_b"
+version = "0.0.1"
+description = ""
+requires-python = ">= 3.7"
+dependencies = [
+ "core @ file:///${PROJECT_ROOT}/../core",
+]
+
+[build-system]
+requires = ["pdm-pep517"]
+build-backend = "pdm.pep517.api"
diff --git a/tests/fixtures/projects/test-monorepo/pyproject.toml b/tests/fixtures/projects/test-monorepo/pyproject.toml
new file mode 100644
index 00000000..6ffaf01f
--- /dev/null
+++ b/tests/fixtures/projects/test-monorepo/pyproject.toml
@@ -0,0 +1,10 @@
+[project]
+requires-python = ">= 3.7"
+dependencies = [
+ "package_a @ file:///${PROJECT_ROOT}/package_a",
+ "package_b @ file:///${PROJECT_ROOT}/package_b",
+]
+
+[build-system]
+requires = ["pdm-pep517"]
+build-backend = "pdm.pep517.api"
|
Same package dependency with relative url is considered different even though absolute url is the same
- [X] I have searched the issue tracker and believe that this is not a duplicate.
## Context
On a monorepo, if 2 packages (package_a and package_b) have the same dependency (core) using a relative url, it fails to install the dependency
```
dependencies = [
"core @ file:///${PROJECT_ROOT}/../core",
]
```
POC: https://github.com/micaelmalta/pdm_test
## Steps to reproduce
```
git clone [email protected]:micaelmalta/pdm_test.git
pdm venv create
pdm install
```
## Actual behavior
```Lock file does not exist
Updating the lock file...
🔒 Lock failed
Unable to find a resolution for core
because of the following conflicts:
core @ file:///xxx/pdm_test/package_a/../core (from package-a@file:///${PROJECT_ROOT}/package_a)
core @ file:///xxx/pdm_test/package_b/../core (from package-b@file:///${PROJECT_ROOT}/package_b)
To fix this, you could loosen the dependency version constraints in pyproject.toml. See https://pdm.fming.dev/latest/usage/dependency/#solve-the-locking-failure for more details.
See /var/folders/64/plzsy9_538x5rw7vv90m74j40000gp/T/pdm-lock-gl5qasxy.log for detailed debug log.
[ResolutionImpossible]: Unable to find a resolution
Add '-v' to see the detailed traceback
```
## Expected behavior
Core package successfuly install
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
PDM version:
2.4.5
Python Interpreter:
/xxx/pdm_test/.venv/bin/python (3.11)
Project Root:
/xxx/pdm_test
Project Packages:
None
{
"implementation_name": "cpython",
"implementation_version": "3.11.1",
"os_name": "posix",
"platform_machine": "arm64",
"platform_release": "21.6.0",
"platform_system": "Darwin",
"platform_version": "Darwin Kernel Version 21.6.0: Thu Sep 29 20:11:33 PDT 2022; root:xnu-8020.240.7~1/RELEASE_ARM64_T8110",
"python_full_version": "3.11.1",
"platform_python_implementation": "CPython",
"python_version": "3.11",
"sys_platform": "darwin"
}
```
|
0.0
|
72c82f5441fe3b5e30358e9e7c1ea73647b852bf
|
[
"tests/cli/test_install.py::test_install_monorepo_with_rel_paths"
] |
[
"tests/cli/test_install.py::test_sync_packages_with_group_all",
"tests/cli/test_install.py::test_sync_packages_with_all_dev",
"tests/cli/test_install.py::test_sync_no_lockfile",
"tests/cli/test_install.py::test_sync_clean_packages",
"tests/cli/test_install.py::test_sync_dry_run",
"tests/cli/test_install.py::test_sync_only_different",
"tests/cli/test_install.py::test_sync_in_sequential_mode",
"tests/cli/test_install.py::test_sync_packages_with_groups",
"tests/cli/test_install.py::test_sync_production_packages[False]",
"tests/cli/test_install.py::test_sync_production_packages[True]",
"tests/cli/test_install.py::test_sync_without_self",
"tests/cli/test_install.py::test_sync_with_index_change",
"tests/cli/test_install.py::test_install_command",
"tests/cli/test_install.py::test_sync_command",
"tests/cli/test_install.py::test_install_with_lockfile",
"tests/cli/test_install.py::test_install_with_dry_run",
"tests/cli/test_install.py::test_install_check",
"tests/cli/test_install.py::test_sync_with_pure_option",
"tests/cli/test_install.py::test_install_referencing_self_package"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-15 08:47:25+00:00
|
mit
| 4,496 |
|
pdm-project__pdm-1766
|
diff --git a/news/1765.bugfix.md b/news/1765.bugfix.md
new file mode 100644
index 00000000..3d58604a
--- /dev/null
+++ b/news/1765.bugfix.md
@@ -0,0 +1,1 @@
+Improve the error message when parsing an invalid requirement string.
diff --git a/src/pdm/models/requirements.py b/src/pdm/models/requirements.py
index 56d75472..e9e23db2 100644
--- a/src/pdm/models/requirements.py
+++ b/src/pdm/models/requirements.py
@@ -473,7 +473,10 @@ def parse_requirement(line: str, editable: bool = False) -> Requirement:
m = _file_req_re.match(line)
if m is None:
raise RequirementError(str(e)) from None
- r = FileRequirement.create(**m.groupdict())
+ args = m.groupdict()
+ if not args["url"] and args["path"] and not os.path.exists(args["path"]):
+ raise RequirementError(str(e)) from None
+ r = FileRequirement.create(**args)
else:
r = Requirement.from_pkg_requirement(pkg_req)
if replaced:
|
pdm-project/pdm
|
ba3c22d6900c99ed2fdcb093b84484957616cdf4
|
diff --git a/tests/models/test_candidates.py b/tests/models/test_candidates.py
index 5ee2373b..a9e58a38 100644
--- a/tests/models/test_candidates.py
+++ b/tests/models/test_candidates.py
@@ -102,6 +102,7 @@ def test_extras_warning(project, recwarn):
assert candidate.version == "0.0.1"
[email protected](reason="packaging 22 no longer supports legacy specifiers")
@pytest.mark.usefixtures("local_finder")
def test_parse_abnormal_specifiers(project):
req = parse_requirement("http://fixtures.test/artifacts/celery-4.4.2-py2.py3-none-any.whl")
diff --git a/tests/models/test_requirements.py b/tests/models/test_requirements.py
index 32b9a96a..a1ecc622 100644
--- a/tests/models/test_requirements.py
+++ b/tests/models/test_requirements.py
@@ -74,8 +74,9 @@ def test_convert_req_dict_to_req_line(req, result):
@pytest.mark.parametrize(
"line,expected",
[
- ("requests; os_name=>'nt'", "Invalid marker:"),
+ ("requests; os_name=>'nt'", None),
("./tests", r"The local path (.+)? is not installable"),
+ ("django>=2<4", None),
],
)
def test_illegal_requirement_line(line, expected):
|
TypeError: a bytes-like object is required, not 'str'
- [X] I have searched the issue tracker and believe that this is not a duplicate.
## Steps to reproduce
pdm add -v model-bakery==1.3.3
## Actual behavior
```
Adding packages to default dependencies: model-bakery==1.3.3
STATUS: Resolving dependencies
pdm.termui: ======== Start resolving requirements ========
pdm.termui: model-bakery==1.3.3
pdm.termui: python>=3.11
pdm.termui: Adding requirement model-bakery==1.3.3
pdm.termui: Adding requirement python>=3.11
pdm.termui: ======== Starting round 0 ========
STATUS: Resolving: new pin python>=3.11
pdm.termui: Pinning: python None
pdm.termui: ======== Ending round 0 ========
pdm.termui: ======== Starting round 1 ========
pdm.termui: Adding requirement b''(from model-bakery 1.3.3)
Traceback (most recent call last):
File "/opt/homebrew/bin/pdm", line 8, in <module>
sys.exit(main())
^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/core.py", line 247, in main
return Core().main(args)
^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/core.py", line 181, in main
raise cast(Exception, err).with_traceback(traceback) from None
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/core.py", line 176, in main
f(project, options)
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/cli/commands/add.py", line 57, in handle
actions.do_add(
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/cli/actions.py", line 271, in do_add
resolved = do_lock(project, strategy, tracked_names, reqs, dry_run=dry_run, hooks=hooks)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/cli/actions.py", line 94, in do_lock
mapping, dependencies = resolve(
^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/resolver/core.py", line 35, in resolve
result = resolver.resolve(requirements, max_rounds)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/resolvelib/resolvers.py", line 521, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/resolvelib/resolvers.py", line 402, in resolve
failure_causes = self._attempt_to_pin_criterion(name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/resolvelib/resolvers.py", line 238, in _attempt_to_pin_criterion
criteria = self._get_updated_criteria(candidate)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/resolvelib/resolvers.py", line 229, in _get_updated_criteria
self._add_to_criteria(criteria, requirement, parent=candidate)
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/resolvelib/resolvers.py", line 172, in _add_to_criteria
if not criterion.candidates:
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/resolvelib/structs.py", line 127, in __bool__
next(iter(self))
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/resolver/providers.py", line 241, in matches_gen
yield from super_find()
^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/resolver/providers.py", line 139, in matches_gen
candidates = self._find_candidates(reqs[0])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/resolver/providers.py", line 118, in _find_candidates
can = make_candidate(requirement)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/models/candidates.py", line 592, in make_candidate
return Candidate(req, name, version, link)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/models/candidates.py", line 147, in __init__
link = req.as_file_link() # type: ignore[attr-defined]
^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/pdm/models/requirements.py", line 318, in as_file_link
return Link(url)
^^^^^^^^^
File "<string>", line 9, in __init__
File "/opt/homebrew/Cellar/pdm/2.4.8/libexec/lib/python3.11/site-packages/unearth/link.py", line 54, in __post_init__
if self.url.startswith(vcs_prefixes):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: a bytes-like object is required, not 'str'
```
## Expected behavior
```
Adding packages to default dependencies: model-bakery
🔒 Lock successful
Changes are written to pdm.lock.
Changes are written to pyproject.toml.
Synchronizing working set with lock file: 4 to add, 0 to update, 0 to remove
✔ Install asgiref 3.6.0 successful
✔ Install model-bakery 1.3.3 successful
✔ Install sqlparse 0.4.3 successful
✔ Install django 4.1.7 successful
🎉 All complete!
```
## Environment Information
```bash
PDM version:
2.4.8
Python Interpreter:
/Users/wgordon/.venv/bin/python (3.11)
Project Root:
/Users/wgordon
Project Packages:
None
{
"implementation_name": "cpython",
"implementation_version": "3.11.2",
"os_name": "posix",
"platform_machine": "arm64",
"platform_release": "22.3.0",
"platform_system": "Darwin",
"platform_version": "Darwin Kernel Version 22.3.0: Mon Jan 30 20:38:37 PST 2023; root:xnu-8792.81.3~2/RELEASE_ARM64_T6000",
"python_full_version": "3.11.2",
"platform_python_implementation": "CPython",
"python_version": "3.11",
"sys_platform": "darwin"
}
```
|
0.0
|
ba3c22d6900c99ed2fdcb093b84484957616cdf4
|
[
"tests/models/test_requirements.py::test_illegal_requirement_line[django>=2<4-None]"
] |
[
"tests/models/test_candidates.py::test_parse_local_directory_metadata[False]",
"tests/models/test_candidates.py::test_parse_local_directory_metadata[True]",
"tests/models/test_candidates.py::test_parse_vcs_metadata[False]",
"tests/models/test_candidates.py::test_parse_vcs_metadata[True]",
"tests/models/test_candidates.py::test_parse_artifact_metadata[/root/data/temp_dir/tmpyt_uyytw/pdm-project__pdm__0.0/tests/fixtures/artifacts/demo-0.0.1.tar.gz]",
"tests/models/test_candidates.py::test_parse_artifact_metadata[/root/data/temp_dir/tmpyt_uyytw/pdm-project__pdm__0.0/tests/fixtures/artifacts/demo-0.0.1-py2.py3-none-any.whl]",
"tests/models/test_candidates.py::test_parse_metadata_with_extras",
"tests/models/test_candidates.py::test_parse_remote_link_metadata",
"tests/models/test_candidates.py::test_extras_warning",
"tests/models/test_candidates.py::test_expand_project_root_in_url[demo",
"tests/models/test_candidates.py::test_expand_project_root_in_url[-e",
"tests/models/test_candidates.py::test_parse_project_file_on_build_error",
"tests/models/test_candidates.py::test_parse_project_file_on_build_error_with_extras",
"tests/models/test_candidates.py::test_parse_project_file_on_build_error_no_dep",
"tests/models/test_candidates.py::test_parse_poetry_project_metadata[False]",
"tests/models/test_candidates.py::test_parse_poetry_project_metadata[True]",
"tests/models/test_candidates.py::test_parse_flit_project_metadata[False]",
"tests/models/test_candidates.py::test_parse_flit_project_metadata[True]",
"tests/models/test_candidates.py::test_sdist_candidate_with_wheel_cache",
"tests/models/test_candidates.py::test_cache_vcs_immutable_revision",
"tests/models/test_candidates.py::test_cache_egg_info_sdist",
"tests/models/test_candidates.py::test_invalidate_incompatible_wheel_link",
"tests/models/test_candidates.py::test_legacy_pep345_tag_link",
"tests/models/test_candidates.py::test_ignore_invalid_py_version",
"tests/models/test_candidates.py::test_find_candidates_from_find_links",
"tests/models/test_candidates.py::test_parse_metadata_from_pep621",
"tests/models/test_candidates.py::test_parse_metadata_with_dynamic_fields",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests-None]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests<2.21.0,>=2.20.0-None]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests==2.19.0;",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests[security,tests]==2.8.*,>=2.8.1;",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[pip",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[git+http://git.example.com/MyProject.git@master#egg=MyProject-MyProject",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[https://github.com/pypa/pip/archive/1.3.1.zip-None]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[/root/data/temp_dir/tmpyt_uyytw/pdm-project__pdm__0.0/tests/fixtures/projects/demo-demo",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[/root/data/temp_dir/tmpyt_uyytw/pdm-project__pdm__0.0/tests/fixtures/artifacts/demo-0.0.1-py2.py3-none-any.whl-demo",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[/root/data/temp_dir/tmpyt_uyytw/pdm-project__pdm__0.0/tests/fixtures/projects/demo[security]-demo[security]",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[requests;",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[[email protected]:pypa/pip.git#egg=pip-pip",
"tests/models/test_requirements.py::test_convert_req_dict_to_req_line[foo",
"tests/models/test_requirements.py::test_illegal_requirement_line[requests;",
"tests/models/test_requirements.py::test_illegal_requirement_line[./tests-The",
"tests/models/test_requirements.py::test_not_supported_editable_requirement[requests",
"tests/models/test_requirements.py::test_not_supported_editable_requirement[https://github.com/pypa/pip/archive/1.3.1.zip]"
] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-13 02:12:31+00:00
|
mit
| 4,497 |
|
pdm-project__pdm-1976
|
diff --git a/news/1970.bugfix.md b/news/1970.bugfix.md
new file mode 100644
index 00000000..df68a998
--- /dev/null
+++ b/news/1970.bugfix.md
@@ -0,0 +1,1 @@
+Guess the project name from VCS url if it is missing when importing from requirements.txt.
diff --git a/src/pdm/formats/requirements.py b/src/pdm/formats/requirements.py
index f7a32e04..c18ebb98 100644
--- a/src/pdm/formats/requirements.py
+++ b/src/pdm/formats/requirements.py
@@ -8,7 +8,7 @@ import urllib.parse
from typing import TYPE_CHECKING, Any, Mapping
from pdm.formats.base import make_array
-from pdm.models.requirements import Requirement, parse_requirement
+from pdm.models.requirements import FileRequirement, Requirement, parse_requirement
if TYPE_CHECKING:
from argparse import Namespace
@@ -53,7 +53,11 @@ class RequirementParser:
if not line.startswith("-"):
# Starts with a requirement, just ignore all per-requirement options
req_string = line.split(" -", 1)[0].strip()
- self.requirements.append(parse_requirement(req_string))
+ req = parse_requirement(req_string)
+ if not req.name:
+ assert isinstance(req, FileRequirement)
+ req.name = req.guess_name()
+ self.requirements.append(req)
return
args, _ = self._parser.parse_known_args(shlex.split(line))
if args.index_url:
diff --git a/src/pdm/models/repositories.py b/src/pdm/models/repositories.py
index 8cd5a582..ded3887e 100644
--- a/src/pdm/models/repositories.py
+++ b/src/pdm/models/repositories.py
@@ -4,7 +4,7 @@ import dataclasses
import posixpath
import sys
from functools import wraps
-from typing import TYPE_CHECKING, Any, Callable, Iterable, Mapping, TypeVar, cast
+from typing import TYPE_CHECKING, TypeVar, cast
from pdm import termui
from pdm.exceptions import CandidateInfoNotFound, CandidateNotFound
@@ -25,11 +25,15 @@ from pdm.utils import (
)
if TYPE_CHECKING:
+ from typing import Any, Callable, Iterable, Mapping
+
from unearth import Link
from pdm._types import CandidateInfo, RepositoryConfig, SearchResult
from pdm.environments import BaseEnvironment
+ CandidateKey = tuple[str, str | None, str | None, bool]
+
ALLOW_ALL_PYTHON = PySpecSet()
T = TypeVar("T", bound="BaseRepository")
@@ -390,9 +394,9 @@ class LockedRepository(BaseRepository):
environment: BaseEnvironment,
) -> None:
super().__init__(sources, environment, ignore_compatibility=False)
- self.packages: dict[tuple, Candidate] = {}
+ self.packages: dict[CandidateKey, Candidate] = {}
self.file_hashes: dict[tuple[str, str], dict[Link, str]] = {}
- self.candidate_info: dict[tuple, CandidateInfo] = {}
+ self.candidate_info: dict[CandidateKey, CandidateInfo] = {}
self._read_lockfile(lockfile)
@property
@@ -428,7 +432,7 @@ class LockedRepository(BaseRepository):
Link(item["url"]): item["hash"] for item in hashes if "url" in item
}
- def _identify_candidate(self, candidate: Candidate) -> tuple:
+ def _identify_candidate(self, candidate: Candidate) -> CandidateKey:
url = getattr(candidate.req, "url", None)
if url is not None:
url = url_without_fragments(url)
@@ -452,17 +456,22 @@ class LockedRepository(BaseRepository):
self._get_dependencies_from_lockfile,
)
- def _matching_keys(self, requirement: Requirement) -> Iterable[tuple]:
+ def _matching_keys(self, requirement: Requirement) -> Iterable[CandidateKey]:
+ from pdm.models.requirements import FileRequirement
+
for key in self.candidate_info:
+ can_req = self.packages[key].req
if requirement.name:
if key[0] != requirement.identify():
continue
- elif key[2] is not None:
- if key[2] != url_without_fragments(getattr(requirement, "url", "")):
- continue
else:
- can_req = self.packages[key].req
- if can_req.path != getattr(requirement, "path", None): # type: ignore[attr-defined]
+ assert isinstance(requirement, FileRequirement)
+ if not isinstance(can_req, FileRequirement):
+ continue
+ if requirement.path and can_req.path:
+ if requirement.path != can_req.path:
+ continue
+ elif key[2] is not None and key[2] != url_without_fragments(requirement.url):
continue
yield key
diff --git a/src/pdm/models/requirements.py b/src/pdm/models/requirements.py
index 9f1f21f6..41b6b4ee 100644
--- a/src/pdm/models/requirements.py
+++ b/src/pdm/models/requirements.py
@@ -258,6 +258,35 @@ class FileRequirement(Requirement):
def _hash_key(self) -> tuple:
return (*super()._hash_key(), self.get_full_url(), self.editable)
+ def guess_name(self) -> str | None:
+ filename = os.path.basename(urlparse.unquote(url_without_fragments(self.url))).rsplit("@", 1)[0]
+ if self.is_vcs:
+ if self.vcs == "git": # type: ignore[attr-defined]
+ name = filename
+ if name.endswith(".git"):
+ name = name[:-4]
+ return name
+ elif self.vcs == "hg": # type: ignore[attr-defined]
+ return filename
+ else: # svn and bzr
+ name, in_branch, _ = filename.rpartition("/branches/")
+ if not in_branch and name.endswith("/trunk"):
+ return name[:-6]
+ return name
+ elif filename.endswith(".whl"):
+ return parse_wheel_filename(filename)[0]
+ else:
+ try:
+ return parse_sdist_filename(filename)[0]
+ except ValueError:
+ match = _egg_info_re.match(filename)
+ # Filename is like `<name>-<version>.tar.gz`, where name will be
+ # extracted and version will be left to be determined from
+ # the metadata.
+ if match:
+ return match.group(1)
+ return None
+
@classmethod
def create(cls: type[T], **kwargs: Any) -> T:
if kwargs.get("path"):
@@ -354,19 +383,7 @@ class FileRequirement(Requirement):
if not self.extras:
self.extras = extras
if not self.name and not self.is_vcs:
- filename = os.path.basename(urlparse.unquote(url_without_fragments(self.url)))
- if filename.endswith(".whl"):
- self.name, *_ = parse_wheel_filename(filename)
- else:
- try:
- self.name, *_ = parse_sdist_filename(filename)
- except ValueError:
- match = _egg_info_re.match(filename)
- # Filename is like `<name>-<version>.tar.gz`, where name will be
- # extracted and version will be left to be determined from
- # the metadata.
- if match:
- self.name = match.group(1)
+ self.name = self.guess_name()
def _check_installable(self) -> None:
assert self.path
|
pdm-project/pdm
|
4cafd2f2f8456c69b4518070ec72d851e0806d5a
|
diff --git a/tests/test_formats.py b/tests/test_formats.py
index 289bc203..85b7c605 100644
--- a/tests/test_formats.py
+++ b/tests/test_formats.py
@@ -52,7 +52,7 @@ def test_convert_requirements_file_without_name(project, vcs):
assert requirements.check_fingerprint(project, str(req_file))
result, _ = requirements.convert(project, str(req_file), Namespace(dev=False, group=None))
- assert result["dependencies"] == ["git+https://github.com/test-root/demo.git"]
+ assert result["dependencies"] == ["demo @ git+https://github.com/test-root/demo.git"]
def test_convert_poetry(project):
|
pdm lock & pdm install on git dependencies
- [x] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
1. Create a clean folder
2. `pdm init -n`
3. `echo git+https://github.com/online-ml/river > requirements.txt`
4. `pdm import requirements.txt`
5. `pdm lock`
6. `pdm install` <-- this fails.
However, `pdm add git+https://github.com/online-ml/river` works so there seems to be something weird with the flow.
## Actual behavior
```❯ pdm install -vv
STATUS: Resolving packages from lockfile...
Traceback (most recent call last):
File "/Users/martinroed/.local/bin/pdm", line 8, in <module>
sys.exit(main())
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 255, in main
return Core().main(args)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 193, in main
raise cast(Exception, err).with_traceback(traceback) from None
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 188, in main
self.handle(project, options)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/core.py", line 154, in handle
command.handle(project, options)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/cli/commands/install.py", line 62, in handle
actions.do_sync(
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/cli/actions.py", line 197, in do_sync
candidates = resolve_candidates_from_lockfile(project, requirements)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/cli/actions.py", line 140, in resolve_candidates_from_lockfile
mapping, *_ = resolve(
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/resolver/core.py", line 35, in resolve
result = resolver.resolve(requirements, max_rounds)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 546, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 397, in resolve
self._add_to_criteria(self.state.criteria, r, parent=None)
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/resolvers.py", line 173, in _add_to_criteria
if not criterion.candidates:
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/resolvelib/structs.py", line 127, in __bool__
next(iter(self))
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/resolver/providers.py", line 140, in <genexpr>
return (
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/models/repositories.py", line 472, in find_candidates
for key in self._matching_keys(requirement):
File "/Users/martinroed/.local/pipx/venvs/pdm/lib/python3.10/site-packages/pdm/models/repositories.py", line 456, in _matching_keys
if can_req.path != getattr(requirement, "path", None): # type: ignore[attr-defined]
AttributeError: 'NamedRequirement' object has no attribute 'path'
```
## Expected behavior
Install should work!
## Environment Information
```bash
❯ pdm info && pdm info --env
PDM version:
2.5.2
Python Interpreter:
/Users/martinroed/play/debug-river/.venv/bin/python (3.11)
Project Root:
/Users/martinroed/play/debug-river
Local Packages:
PDM 2.5.2 is installed, while 2.7.0 is available.
Please run `pdm self update` to upgrade.
Run `pdm config check_update false` to disable the check.
{
"implementation_name": "cpython",
"implementation_version": "3.11.3",
"os_name": "posix",
"platform_machine": "arm64",
"platform_release": "22.2.0",
"platform_system": "Darwin",
"platform_version": "Darwin Kernel Version 22.2.0: Fri Nov 11 02:06:26 PST 2022;
root:xnu-8792.61.2~4/RELEASE_ARM64_T8112",
"python_full_version": "3.11.3",
"platform_python_implementation": "CPython",
"python_version": "3.11",
"sys_platform": "darwin"
}
PDM 2.5.2 is installed, while 2.7.0 is available.
Please run `pdm self update` to upgrade.
Run `pdm config check_update false` to disable the check.
```
|
0.0
|
4cafd2f2f8456c69b4518070ec72d851e0806d5a
|
[
"tests/test_formats.py::test_convert_requirements_file_without_name"
] |
[
"tests/test_formats.py::test_convert_pipfile",
"tests/test_formats.py::test_convert_requirements_file[True]",
"tests/test_formats.py::test_convert_requirements_file[False]",
"tests/test_formats.py::test_convert_poetry",
"tests/test_formats.py::test_convert_flit",
"tests/test_formats.py::test_import_requirements_with_group",
"tests/test_formats.py::test_keep_env_vars_in_source",
"tests/test_formats.py::test_export_replace_project_root",
"tests/test_formats.py::test_convert_setup_py_project"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-05 03:26:02+00:00
|
mit
| 4,498 |
|
pdm-project__pdm-2048
|
diff --git a/news/2042.feature.md b/news/2042.feature.md
new file mode 100644
index 00000000..3df0d517
--- /dev/null
+++ b/news/2042.feature.md
@@ -0,0 +1,1 @@
+Add `--project` option to `pdm venv` to support another path as the project root.
diff --git a/src/pdm/cli/commands/venv/__init__.py b/src/pdm/cli/commands/venv/__init__.py
index 0e156d2e..b28e881f 100644
--- a/src/pdm/cli/commands/venv/__init__.py
+++ b/src/pdm/cli/commands/venv/__init__.py
@@ -9,7 +9,7 @@ from pdm.cli.commands.venv.list import ListCommand
from pdm.cli.commands.venv.purge import PurgeCommand
from pdm.cli.commands.venv.remove import RemoveCommand
from pdm.cli.commands.venv.utils import get_venv_with_name
-from pdm.cli.options import Option
+from pdm.cli.options import project_option
from pdm.project import Project
@@ -17,7 +17,7 @@ class Command(BaseCommand):
"""Virtualenv management"""
name = "venv"
- arguments: list[Option] = []
+ arguments = [project_option]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_mutually_exclusive_group()
|
pdm-project/pdm
|
0912908320efe6524c3b8e07cfda19854f8d3d41
|
diff --git a/tests/cli/test_venv.py b/tests/cli/test_venv.py
index 046bf97f..49139194 100644
--- a/tests/cli/test_venv.py
+++ b/tests/cli/test_venv.py
@@ -50,6 +50,16 @@ def test_venv_create_in_project(pdm, project):
assert "is not empty" in result.stderr
[email protected]("fake_create")
+def test_venv_create_other_location(pdm, project):
+ pdm(["venv", "-p", project.root.as_posix(), "create"], strict=True)
+ venv_path = project.root / ".venv"
+ assert venv_path.exists()
+ result = pdm(["venv", "-p", project.root.as_posix(), "create"])
+ assert result.exit_code == 1
+ assert "is not empty" in result.stderr
+
+
@pytest.mark.usefixtures("fake_create")
def test_venv_show_path(pdm, project):
project.project_config["venv.in_project"] = True
|
Add project path argument while running pdm venv command
## Is your feature request related to a problem? Please describe.
I often find it frustrating when trying to run the pdm venv command without being able to specify the project path. Currently, the command assumes that the project is located in the current working directory, which can be inconvenient in certain scenarios.
## Describe the solution you'd like
I would like to propose adding a project path argument to the pdm venv command. This would allow users to specify the path to their project directory when creating a virtual environment. This feature would provide more flexibility and convenience, especially for projects that are not located in the current working directory.
By adding the project path argument, users can easily create virtual environments for projects located in different directories without having to navigate to each project's directory first. This would streamline the workflow and save time for developers working with multiple projects simultaneously.
Overall, the addition of the project path argument to the pdm venv command would greatly enhance the usability and flexibility of the PDM tool.
|
0.0
|
0912908320efe6524c3b8e07cfda19854f8d3d41
|
[
"tests/cli/test_venv.py::test_venv_create_other_location"
] |
[
"tests/cli/test_venv.py::test_venv_create",
"tests/cli/test_venv.py::test_venv_create_in_project",
"tests/cli/test_venv.py::test_venv_show_path",
"tests/cli/test_venv.py::test_venv_list",
"tests/cli/test_venv.py::test_venv_remove",
"tests/cli/test_venv.py::test_venv_recreate",
"tests/cli/test_venv.py::test_venv_activate[virtualenv]",
"tests/cli/test_venv.py::test_venv_activate[venv]",
"tests/cli/test_venv.py::test_venv_activate_custom_prompt[virtualenv]",
"tests/cli/test_venv.py::test_venv_activate_custom_prompt[venv]",
"tests/cli/test_venv.py::test_venv_activate_project_without_python",
"tests/cli/test_venv.py::test_venv_activate_error",
"tests/cli/test_venv.py::test_venv_auto_create[True]",
"tests/cli/test_venv.py::test_venv_auto_create[False]",
"tests/cli/test_venv.py::test_venv_purge",
"tests/cli/test_venv.py::test_venv_purge_force",
"tests/cli/test_venv.py::test_venv_purge_interactive[virtualenv-none-True]",
"tests/cli/test_venv.py::test_venv_purge_interactive[virtualenv-0-False]",
"tests/cli/test_venv.py::test_venv_purge_interactive[virtualenv-all-False]",
"tests/cli/test_venv.py::test_venv_purge_interactive[venv-none-True]",
"tests/cli/test_venv.py::test_venv_purge_interactive[venv-0-False]",
"tests/cli/test_venv.py::test_venv_purge_interactive[venv-all-False]",
"tests/cli/test_venv.py::test_virtualenv_backend_create[True]",
"tests/cli/test_venv.py::test_virtualenv_backend_create[False]",
"tests/cli/test_venv.py::test_venv_backend_create[True]",
"tests/cli/test_venv.py::test_venv_backend_create[False]",
"tests/cli/test_venv.py::test_conda_backend_create[True]",
"tests/cli/test_venv.py::test_conda_backend_create[False]"
] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-21 14:44:47+00:00
|
mit
| 4,499 |
|
pdm-project__pdm-2188
|
diff --git a/news/2182.bugfix.md b/news/2182.bugfix.md
new file mode 100644
index 00000000..7ef16f94
--- /dev/null
+++ b/news/2182.bugfix.md
@@ -0,0 +1,1 @@
+Makes `comarable_version("1.2.3+local1") == Version("1.2.3")`.
\ No newline at end of file
diff --git a/news/2187.bugfix.md b/news/2187.bugfix.md
new file mode 100644
index 00000000..137b3605
--- /dev/null
+++ b/news/2187.bugfix.md
@@ -0,0 +1,1 @@
+Default behavior for pdm venv activate when shell detection fails.
diff --git a/src/pdm/cli/commands/venv/activate.py b/src/pdm/cli/commands/venv/activate.py
index e1fb176f..3d04c593 100644
--- a/src/pdm/cli/commands/venv/activate.py
+++ b/src/pdm/cli/commands/venv/activate.py
@@ -45,7 +45,10 @@ class ActivateCommand(BaseCommand):
project.core.ui.echo(self.get_activate_command(venv))
def get_activate_command(self, venv: VirtualEnv) -> str: # pragma: no cover
- shell, _ = shellingham.detect_shell()
+ try:
+ shell, _ = shellingham.detect_shell()
+ except shellingham.ShellDetectionFailure:
+ shell = None
if shell == "fish":
command, filename = "source", "activate.fish"
elif shell == "csh":
diff --git a/src/pdm/utils.py b/src/pdm/utils.py
index 72e4a60f..f903a461 100644
--- a/src/pdm/utils.py
+++ b/src/pdm/utils.py
@@ -19,7 +19,7 @@ import warnings
from pathlib import Path
from typing import TYPE_CHECKING
-from packaging.version import Version
+from packaging.version import Version, _cmpkey
from pdm.compat import importlib_metadata
@@ -344,6 +344,17 @@ def comparable_version(version: str) -> Version:
if parsed.local is not None:
# strip the local part
parsed._version = parsed._version._replace(local=None)
+
+ # To make comparable_version("1.2.3+local1") == Version("1.2.3")
+ parsed._key = _cmpkey(
+ parsed._version.epoch,
+ parsed._version.release,
+ parsed._version.pre,
+ parsed._version.post,
+ parsed._version.dev,
+ parsed._version.local,
+ )
+
return parsed
|
pdm-project/pdm
|
79a56a78ec586244a3b3535e3fdaa19d5ac84619
|
diff --git a/tests/cli/test_venv.py b/tests/cli/test_venv.py
index 49139194..5d11a8d7 100644
--- a/tests/cli/test_venv.py
+++ b/tests/cli/test_venv.py
@@ -5,6 +5,7 @@ import sys
from unittest.mock import ANY
import pytest
+import shellingham
from pdm.cli.commands.venv import backends
from pdm.cli.commands.venv.utils import get_venv_prefix
@@ -168,6 +169,26 @@ def test_venv_activate_error(pdm, project):
assert "Can't activate a non-venv Python" in result.stderr
[email protected]("venv_backends")
+def test_venv_activate_no_shell(pdm, mocker, project):
+ project.project_config["venv.in_project"] = False
+ result = pdm(["venv", "create"], obj=project)
+ assert result.exit_code == 0, result.stderr
+ venv_path = re.match(r"Virtualenv (.+) is created successfully", result.output).group(1)
+ key = os.path.basename(venv_path)[len(get_venv_prefix(project)) :]
+
+ mocker.patch("shellingham.detect_shell", side_effect=shellingham.ShellDetectionFailure())
+ result = pdm(["venv", "activate", key], obj=project)
+ assert result.exit_code == 0, result.stderr
+ backend = project.config["venv.backend"]
+
+ if backend == "conda":
+ assert result.output.startswith("conda activate")
+ else:
+ assert result.output.strip("'\"\n").endswith("activate")
+ assert result.output.startswith("source")
+
+
@pytest.mark.usefixtures("fake_create")
@pytest.mark.parametrize("keep_pypackages", [True, False])
def test_venv_auto_create(pdm, mocker, project, keep_pypackages):
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 218a3205..86976e26 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -3,6 +3,7 @@ import sys
import pytest
import tomlkit
+from packaging.version import Version
from pdm import utils
from pdm.cli import utils as cli_utils
@@ -145,3 +146,8 @@ def test_deprecation_warning():
with pytest.raises(FutureWarning):
utils.deprecation_warning("Test warning", raise_since="0.0")
+
+
+def test_comparable_version():
+ assert utils.comparable_version("1.2.3") == Version("1.2.3")
+ assert utils.comparable_version("1.2.3a1+local1") == Version("1.2.3a1")
|
pdm venv activate: ShellDetectionFailure
- [x] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
```zsh
$ pdm venv create
[...]
$ pdm venv activate in-project
[ShellDetectionFailure]:
Add '-v' to see the detailed traceback
```
I am not certain what environment steps are required to get a ShellDetectionFailure. I'm running zsh (v5.4.2) inside VSCode.
## Actual behavior
When shellingham fails to detect a shell, the `pdm venv activate` command fails.
## Expected behavior
`pdm venv activate` should try a plausible default (e.g. `source .venv/bin/activate`) instead of failing. From the Shellingham docs:
> Shellingham raises ShellDetectionFailure if there is no shell to detect, but your application should almost never do this to your user.
## Environment Information
```bash
PDM version:
2.8.2
Python Interpreter:
/home/philip.garrison/.pyenv/versions/3.8.13/bin/python3.8 (3.8)
Project Root:
/home/philip.garrison/workspace/aics/nuc-morph-analysis
Local Packages:
/home/philip.garrison/workspace/aics/nuc-morph-analysis/__pypackages__/3.8
{
"implementation_name": "cpython",
"implementation_version": "3.8.13",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "4.15.0-210-generic",
"platform_system": "Linux",
"platform_version": "#221-Ubuntu SMP Tue Apr 18 08:32:52 UTC 2023",
"python_full_version": "3.8.13",
"platform_python_implementation": "CPython",
"python_version": "3.8",
"sys_platform": "linux"
}
```
|
0.0
|
79a56a78ec586244a3b3535e3fdaa19d5ac84619
|
[
"tests/cli/test_venv.py::test_venv_activate_no_shell[virtualenv]",
"tests/cli/test_venv.py::test_venv_activate_no_shell[venv]",
"tests/test_utils.py::test_comparable_version"
] |
[
"tests/cli/test_venv.py::test_venv_create",
"tests/cli/test_venv.py::test_venv_create_in_project",
"tests/cli/test_venv.py::test_venv_create_other_location",
"tests/cli/test_venv.py::test_venv_show_path",
"tests/cli/test_venv.py::test_venv_list",
"tests/cli/test_venv.py::test_venv_remove",
"tests/cli/test_venv.py::test_venv_recreate",
"tests/cli/test_venv.py::test_venv_activate[virtualenv]",
"tests/cli/test_venv.py::test_venv_activate[venv]",
"tests/cli/test_venv.py::test_venv_activate_custom_prompt[virtualenv]",
"tests/cli/test_venv.py::test_venv_activate_custom_prompt[venv]",
"tests/cli/test_venv.py::test_venv_activate_project_without_python",
"tests/cli/test_venv.py::test_venv_activate_error",
"tests/cli/test_venv.py::test_venv_auto_create[True]",
"tests/cli/test_venv.py::test_venv_auto_create[False]",
"tests/cli/test_venv.py::test_venv_purge",
"tests/cli/test_venv.py::test_venv_purge_force",
"tests/cli/test_venv.py::test_venv_purge_interactive[virtualenv-none-True]",
"tests/cli/test_venv.py::test_venv_purge_interactive[virtualenv-0-False]",
"tests/cli/test_venv.py::test_venv_purge_interactive[virtualenv-all-False]",
"tests/cli/test_venv.py::test_venv_purge_interactive[venv-none-True]",
"tests/cli/test_venv.py::test_venv_purge_interactive[venv-0-False]",
"tests/cli/test_venv.py::test_venv_purge_interactive[venv-all-False]",
"tests/cli/test_venv.py::test_virtualenv_backend_create[True]",
"tests/cli/test_venv.py::test_virtualenv_backend_create[False]",
"tests/cli/test_venv.py::test_venv_backend_create[True]",
"tests/cli/test_venv.py::test_venv_backend_create[False]",
"tests/cli/test_venv.py::test_conda_backend_create[True]",
"tests/cli/test_venv.py::test_conda_backend_create[False]",
"tests/test_utils.py::test_expand_env_vars[test-test]",
"tests/test_utils.py::test_expand_env_vars[-]",
"tests/test_utils.py::test_expand_env_vars[${FOO}-hello]",
"tests/test_utils.py::test_expand_env_vars[$FOO-$FOO]",
"tests/test_utils.py::test_expand_env_vars[${BAR}-${BAR}]",
"tests/test_utils.py::test_expand_env_vars[%FOO%-%FOO%]",
"tests/test_utils.py::test_expand_env_vars[${FOO}_${FOO}-hello_hello]",
"tests/test_utils.py::test_expend_env_vars_in_auth[https://example.org/path?arg=1-https://example.org/path?arg=1]",
"tests/test_utils.py::test_expend_env_vars_in_auth[https://${FOO}@example.org/path?arg=1-https://[email protected]/path?arg=1]",
"tests/test_utils.py::test_expend_env_vars_in_auth[https://${FOO}:${BAR}@example.org/path?arg=1-https://hello:wo%[email protected]/path?arg=1]",
"tests/test_utils.py::test_expend_env_vars_in_auth[https://${FOOBAR}@example.org/path?arg=1-https://%24%7BFOOBAR%[email protected]/path?arg=1]",
"tests/test_utils.py::test_find_python_in_path",
"tests/test_utils.py::test_merge_dictionary",
"tests/test_utils.py::test_dependency_group_selection[args0-golden0]",
"tests/test_utils.py::test_dependency_group_selection[args1-golden1]",
"tests/test_utils.py::test_dependency_group_selection[args2-golden2]",
"tests/test_utils.py::test_dependency_group_selection[args3-golden3]",
"tests/test_utils.py::test_dependency_group_selection[args4-golden4]",
"tests/test_utils.py::test_dependency_group_selection[args5-golden5]",
"tests/test_utils.py::test_dependency_group_selection[args6-golden6]",
"tests/test_utils.py::test_dependency_group_selection[args7-golden7]",
"tests/test_utils.py::test_prod_should_not_be_with_dev",
"tests/test_utils.py::test_deprecation_warning"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-16 21:12:53+00:00
|
mit
| 4,500 |
|
pdm-project__pdm-2263
|
diff --git a/news/2071.doc.md b/news/2071.doc.md
new file mode 100644
index 00000000..20f852c2
--- /dev/null
+++ b/news/2071.doc.md
@@ -0,0 +1,1 @@
+Clarify what `--no-isolated` does.
\ No newline at end of file
diff --git a/news/2261.bugfix.md b/news/2261.bugfix.md
new file mode 100644
index 00000000..53a05c30
--- /dev/null
+++ b/news/2261.bugfix.md
@@ -0,0 +1,1 @@
+Reject the candidate if it contains invalid metadata, to avoid a crash in the process of resolution.
diff --git a/src/pdm/cli/options.py b/src/pdm/cli/options.py
index ec5f5f4c..1f431c08 100644
--- a/src/pdm/cli/options.py
+++ b/src/pdm/cli/options.py
@@ -172,7 +172,13 @@ install_group.add_argument(
install_group.add_argument("--fail-fast", "-x", action="store_true", help="Abort on first installation error")
-@Option("--no-isolation", dest="build_isolation", nargs=0, help="Do not isolate the build in a clean environment")
+@Option(
+ "--no-isolation",
+ dest="build_isolation",
+ nargs=0,
+ help="Disable isolation when building a source distribution that follows PEP 517, "
+ "as in: build dependencies specified by PEP 518 must be already installed if this option is used.",
+)
def no_isolation_option(project: Project, namespace: argparse.Namespace, values: str | Sequence[Any] | None) -> None:
os.environ["PDM_BUILD_ISOLATION"] = "no"
diff --git a/src/pdm/resolver/providers.py b/src/pdm/resolver/providers.py
index 0de102f2..d43b46d6 100644
--- a/src/pdm/resolver/providers.py
+++ b/src/pdm/resolver/providers.py
@@ -5,8 +5,10 @@ import os
from typing import TYPE_CHECKING, Callable, cast
from packaging.specifiers import InvalidSpecifier, SpecifierSet
-from resolvelib import AbstractProvider
+from resolvelib import AbstractProvider, RequirementsConflicted
+from resolvelib.resolvers import Criterion
+from pdm.exceptions import InvalidPyVersion, RequirementError
from pdm.models.candidates import Candidate, make_candidate
from pdm.models.repositories import LockedRepository
from pdm.models.requirements import FileRequirement, parse_requirement, strip_extras
@@ -16,6 +18,7 @@ from pdm.resolver.python import (
find_python_matches,
is_python_satisfied_by,
)
+from pdm.termui import logger
from pdm.utils import is_url, url_without_fragments
if TYPE_CHECKING:
@@ -81,9 +84,15 @@ class BaseProvider(AbstractProvider):
)
dep_depth = min(parent_depths, default=0) + 1
# Use the REAL identifier as it may be updated after candidate preparation.
- candidate = next(candidates[identifier])
+ deps: list[Requirement] = []
+ for candidate in candidates[identifier]:
+ try:
+ deps = self.get_dependencies(candidate)
+ except RequirementsConflicted:
+ continue
+ break
self._known_depth[self.identify(candidate)] = dep_depth
- is_backtrack_cause = any(dep.identify() in backtrack_identifiers for dep in self.get_dependencies(candidate))
+ is_backtrack_cause = any(dep.identify() in backtrack_identifiers for dep in deps)
is_file_or_url = any(not requirement.is_named for requirement, _ in information[identifier])
operators = [
spec.operator for req, _ in information[identifier] if req.specifier is not None for spec in req.specifier
@@ -183,7 +192,13 @@ class BaseProvider(AbstractProvider):
def get_dependencies(self, candidate: Candidate) -> list[Requirement]:
if isinstance(candidate, PythonCandidate):
return []
- deps, requires_python, _ = self.repository.get_dependencies(candidate)
+ try:
+ deps, requires_python, _ = self.repository.get_dependencies(candidate)
+ except (RequirementError, InvalidPyVersion, InvalidSpecifier) as e:
+ # When the metadata is invalid, skip this candidate by marking it as conflicting.
+ # Here we pass an empty criterion so it doesn't provide any info to the resolution.
+ logger.error("Invalid metadata in %s: %s", candidate, e)
+ raise RequirementsConflicted(Criterion([], [], [])) from None
# Filter out incompatible dependencies(e.g. functools32) early so that
# we don't get errors when building wheels.
diff --git a/src/pdm/resolver/reporters.py b/src/pdm/resolver/reporters.py
index 54c5d91a..a696bbfb 100644
--- a/src/pdm/resolver/reporters.py
+++ b/src/pdm/resolver/reporters.py
@@ -72,6 +72,9 @@ class SpinnerReporter(BaseReporter):
logger.info(" Adding requirement %s%s", requirement.as_line(), parent_line)
def rejecting_candidate(self, criterion: Criterion, candidate: Candidate) -> None:
+ if not criterion.information:
+ logger.info("Candidate rejected because it contains invalid metadata: %s", candidate)
+ return
*others, last = criterion.information
logger.info(
"Candidate rejected: %s because it introduces a new requirement %s"
|
pdm-project/pdm
|
67e78942a01449d11d3b0486f1907caeb8caaf1c
|
diff --git a/tests/resolver/test_resolve.py b/tests/resolver/test_resolve.py
index 0b338c7a..1ac8e8f5 100644
--- a/tests/resolver/test_resolve.py
+++ b/tests/resolver/test_resolve.py
@@ -320,3 +320,10 @@ def test_resolve_extra_and_underlying_to_the_same_version(resolve, repository):
repository.add_dependencies("bar", "0.1.0", ["foo[enc]>=0.1.0"])
result = resolve(["foo==0.1.0", "bar"])
assert result["foo"].version == result["foo[enc]"].version == "0.1.0"
+
+
+def test_resolve_skip_candidate_with_invalid_metadata(resolve, repository):
+ repository.add_candidate("sqlparse", "0.4.0")
+ repository.add_dependencies("sqlparse", "0.4.0", ["django>=1.11'"])
+ result = resolve(["sqlparse"], ">=3.6")
+ assert result["sqlparse"].version == "0.3.0"
|
PDM crashes in case of invalid package metadata
- [x] I have searched the issue tracker and believe that this is not a duplicate.
## Steps to reproduce
Not simple but the error is pretty telling
## Actual behavior
PDM (probably actually unearth) crashes if a package has invalid metadata
```
pdm.termui: Adding requirement python>=3.11
pdm.termui: ======== Starting round 0 ========
unearth.preparer: Using cached <Link https://files.pythonhosted.org/packages/f4/72/464966f2c60696bae0eae55270f29883e03581d12aeecddf87ef5ffb6752/sentry_sdk-1.9.1-py2.py3-none-any.whl (from https://pypi.org/simple/sentry-sdk/)>
pdm.termui: Error occurs
Traceback (most recent call last):
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/termui.py", line 239, in logging
yield logger
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/cli/actions.py", line 96, in do_lock
mapping, dependencies = resolve(
^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/resolver/core.py", line 36, in resolve
result = resolver.resolve(requirements, max_rounds)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/resolvelib/resolvers.py", line 546, in resolve
state = resolution.resolve(requirements, max_rounds=max_rounds)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/resolvelib/resolvers.py", line 426, in resolve
name = min(unsatisfied_names, key=self._get_preference)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/resolvelib/resolvers.py", line 203, in _get_preference
return self._p.get_preference(
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/resolver/providers.py", line 86, in get_preference
is_backtrack_cause = any(dep.identify() in backtrack_identifiers for dep in self.get_dependencies(candidate))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/resolver/providers.py", line 186, in get_dependencies
deps, requires_python, _ = self.repository.get_dependencies(candidate)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/models/repositories.py", line 85, in get_dependencies
requirements, requires_python, summary = getter(candidate)
^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/models/repositories.py", line 44, in wrapper
result = func(self, candidate)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/models/repositories.py", line 219, in _get_dependencies_from_metadata
deps = prepared.get_dependencies_from_metadata()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/models/candidates.py", line 584, in get_dependencies_from_metadata
return filter_requirements_with_extras(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/models/requirements.py", line 451, in filter_requirements_with_extras
_r = parse_requirement(req)
^^^^^^^^^^^^^^^^^^^^^^
File "/Users/mathieu/.local/pipx/venvs/pdm/lib/python3.11/site-packages/pdm/models/requirements.py", line 503, in parse_requirement
raise RequirementError(str(e)) from None
pdm.exceptions.RequirementError: Expected matching RIGHT_PARENTHESIS for LEFT_PARENTHESIS, after version specifier
urllib3 (>=1.26.11") ; python_version >="3.6"
~~~~~~~~~~^
```
## Expected behavior
Not crash and just discard the invalid package
## Environment Information
```
PDM version:
2.9.2
Python Interpreter:
/Users/mathieu/Library/Application Support/pdm/venvs/myproject-tUhlkEEl-venv3.11/bin/python (3.11)
Project Root:
/Users/mathieu/dev/myproject
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.11.5",
"os_name": "posix",
"platform_machine": "arm64",
"platform_release": "22.6.0",
"platform_system": "Darwin",
"platform_version": "Darwin Kernel Version 22.6.0: Wed Jul 5 22:17:35 PDT 2023; root:xnu-8796.141.3~6/RELEASE_ARM64_T8112",
"python_full_version": "3.11.5",
"platform_python_implementation": "CPython",
"python_version": "3.11",
"sys_platform": "darwin"
}
```
|
0.0
|
67e78942a01449d11d3b0486f1907caeb8caaf1c
|
[
"tests/resolver/test_resolve.py::test_resolve_skip_candidate_with_invalid_metadata"
] |
[
"tests/resolver/test_resolve.py::test_resolve_named_requirement",
"tests/resolver/test_resolve.py::test_resolve_requires_python",
"tests/resolver/test_resolve.py::test_resolve_allow_prereleases",
"tests/resolver/test_resolve.py::test_resolve_with_extras",
"tests/resolver/test_resolve.py::test_resolve_local_artifacts[sdist]",
"tests/resolver/test_resolve.py::test_resolve_local_artifacts[wheel]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[False-/root/data/temp_dir/tmpfw63d4m4/pdm-project__pdm__0.0/tests/fixtures/projects/demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[False-git+https://github.com/test-root/demo.git#egg=demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[True-/root/data/temp_dir/tmpfw63d4m4/pdm-project__pdm__0.0/tests/fixtures/projects/demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_and_local_requirements[True-git+https://github.com/test-root/demo.git#egg=demo]",
"tests/resolver/test_resolve.py::test_resolve_vcs_without_explicit_name",
"tests/resolver/test_resolve.py::test_resolve_local_and_named_requirement",
"tests/resolver/test_resolve.py::test_resolving_auto_avoid_conflicts",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies_with_overrides[2.1]",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies_with_overrides[>=1.8]",
"tests/resolver/test_resolve.py::test_resolve_conflicting_dependencies_with_overrides[==2.1]",
"tests/resolver/test_resolve.py::test_resolve_no_available_versions",
"tests/resolver/test_resolve.py::test_exclude_incompatible_requirements",
"tests/resolver/test_resolve.py::test_union_markers_from_different_parents",
"tests/resolver/test_resolve.py::test_requirements_from_different_groups",
"tests/resolver/test_resolve.py::test_resolve_package_with_dummy_upbound",
"tests/resolver/test_resolve.py::test_resolve_dependency_with_extra_marker",
"tests/resolver/test_resolve.py::test_resolve_circular_dependencies",
"tests/resolver/test_resolve.py::test_resolve_candidates_to_install",
"tests/resolver/test_resolve.py::test_resolve_prefer_requirement_with_prereleases",
"tests/resolver/test_resolve.py::test_resolve_with_python_marker",
"tests/resolver/test_resolve.py::test_resolve_file_req_with_prerelease",
"tests/resolver/test_resolve.py::test_resolve_extra_requirements_no_break_constraints",
"tests/resolver/test_resolve.py::test_resolve_extra_and_underlying_to_the_same_version"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-15 02:37:10+00:00
|
mit
| 4,501 |
|
pdm-project__pdm-2311
|
diff --git a/news/2285.bugfix.md b/news/2285.bugfix.md
new file mode 100644
index 00000000..cdb3966e
--- /dev/null
+++ b/news/2285.bugfix.md
@@ -0,0 +1,1 @@
+Fix a bug that dependency groups from Poetry 1.2+ do not migrate properly to PDM.
diff --git a/src/pdm/cli/commands/update.py b/src/pdm/cli/commands/update.py
index a3981917..8799f811 100644
--- a/src/pdm/cli/commands/update.py
+++ b/src/pdm/cli/commands/update.py
@@ -126,6 +126,7 @@ class Command(BaseCommand):
if not packages:
if prerelease:
raise PdmUsageError("--prerelease must be used with packages given")
+ selection.validate()
for group in selection:
updated_deps[group] = all_dependencies[group]
else:
diff --git a/src/pdm/formats/poetry.py b/src/pdm/formats/poetry.py
index 12487530..79a4d615 100644
--- a/src/pdm/formats/poetry.py
+++ b/src/pdm/formats/poetry.py
@@ -143,9 +143,17 @@ class PoetryMetaConverter(MetaConverter):
@convert_from("dev-dependencies")
def dev_dependencies(self, value: dict) -> None:
- self.settings["dev-dependencies"] = {
- "dev": make_array([r for key, req in value.items() for r in _convert_req(key, req)], True),
- }
+ self.settings.setdefault("dev-dependencies", {})["dev"] = make_array(
+ [r for key, req in value.items() for r in _convert_req(key, req)], True
+ )
+ raise Unset()
+
+ @convert_from("group")
+ def group_dependencies(self, value: dict[str, dict[str, Any]]) -> None:
+ for name, group in value.items():
+ self.settings.setdefault("dev-dependencies", {})[name] = make_array(
+ [r for key, req in group.get("dependencies", {}).items() for r in _convert_req(key, req)], True
+ )
raise Unset()
@convert_from()
|
pdm-project/pdm
|
a551bdee1a6da649074cede0e09219dda12e144b
|
diff --git a/tests/fixtures/poetry-new.toml b/tests/fixtures/poetry-new.toml
new file mode 100644
index 00000000..607ee757
--- /dev/null
+++ b/tests/fixtures/poetry-new.toml
@@ -0,0 +1,20 @@
+[tool.poetry]
+name = "test-poetry"
+version = "0.1.0"
+description = ""
+authors = ["Frost Ming <[email protected]>"]
+readme = "README.md"
+packages = [{include = "test_poetry"}]
+
+[tool.poetry.dependencies]
+python = "^3.8"
+httpx = "*"
+pendulum = "*"
+
+[tool.poetry.group.test.dependencies]
+pytest = "^6.0.0"
+pytest-mock = "*"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
diff --git a/tests/test_formats.py b/tests/test_formats.py
index 204ae531..ea4c1728 100644
--- a/tests/test_formats.py
+++ b/tests/test_formats.py
@@ -87,6 +87,15 @@ def test_convert_poetry(project):
assert build["excludes"] == ["my_package/excluded.py"]
+def test_convert_poetry_12(project):
+ golden_file = FIXTURES / "poetry-new.toml"
+ with cd(FIXTURES):
+ result, settings = poetry.convert(project, golden_file, Namespace(dev=False, group=None))
+
+ assert result["dependencies"] == ["httpx", "pendulum"]
+ assert settings["dev-dependencies"]["test"] == ["pytest<7.0.0,>=6.0.0", "pytest-mock"]
+
+
def test_convert_flit(project):
golden_file = FIXTURES / "projects/flit-demo/pyproject.toml"
assert flit.check_fingerprint(project, golden_file)
|
Dependency Groups from Poetry do not migrate properly to PDM
I tried to migrate a project from Poetry that used [dependency groups](https://python-poetry.org/docs/master/managing-dependencies/#dependency-groups) and the groups were not migrated correctly to PDM.
- [x] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
1. Create a new directory with nothing in it
1. Add the following `pyproject.toml`
```toml
[build-system]
build-backend = "poetry.core.masonry.api"
requires = ["poetry-core"]
[tool.poetry]
name = "example-project"
version = "2.0.0"
description = ""
authors = ["Amit Parekh <[email protected]>"]
packages = [{ from = "src", include = "example_project" }]
[tool.poetry.dependencies]
python = "^3.11"
torch = "^2.0.1"
torchdata = "^0.6.1"
orjson = "^3.8.5"
pydantic = "^1.10.4"
rich = "*"
loguru = "*"
typer = "*"
[tool.poetry.group.lint.dependencies]
black = "*"
mypy = "*"
pre-commit = "*"
ruff = "*"
[tool.poetry.group.tests.dependencies]
pytest = "*"
pytest-cases = "*"
pytest-cov = "*"
```
2. Run `pdm init` to migrate the project to PDM
```bash
> pdm init -v
pyproject.toml already exists, update it now.
Please enter the Python interpreter to use
0. /Users/amit/.pyenv/shims/python3 (3.11)
1. /usr/local/bin/python3 (3.11)
2. /Users/amit/.pyenv/versions/3.11.4/bin/python3.11 (3.11)
3. /Users/amit/.local/share/rtx/installs/python/3.11.5/bin/python (3.11)
Please select (0): 3
Would you like to create a virtualenv with /Users/amit/.local/share/rtx/installs/python/3.11.5/bin/python? [y/n] (y):
Cleaning existing target directory /Users/amit/Develop/test-prob/.venv
Run command: ['/Users/amit/.local/share/rtx/installs/pdm/2.9.2/venv/bin/python', '-m', 'virtualenv', '/Users/amit/Develop/test-prob/.venv',
'-p', '/Users/amit/.local/share/rtx/installs/python/3.11.5/bin/python', '--prompt=test-prob-3.11']
created virtual environment CPython3.11.5.final.0-64 in 331ms
creator CPython3Posix(dest=/Users/amit/Develop/test-prob/.venv, clear=False, no_vcs_ignore=False, global=False)
seeder FromAppData(download=False, pip=bundle, setuptools=bundle, wheel=bundle, via=copy, app_data_dir=/Users/amit/Library/Application Support/virtualenv)
added seed packages: pip==23.2.1, setuptools==68.2.0, wheel==0.41.2
activators BashActivator,CShellActivator,FishActivator,NushellActivator,PowerShellActivator,PythonActivator
Virtualenv is created successfully at /Users/amit/Develop/test-prob/.venv
Is the project a library that is installable?
If yes, we will need to ask a few more questions to include the project name and build backend [y/n] (n):
License(SPDX name) (MIT):
Author name (Amit Parekh):
Author email ([email protected]):
Python requires('*' to allow any) (>=3.11):
Project is initialized successfully
Found following files from other formats that you may import:
0. /Users/amit/Develop/test-prob/pyproject.toml (poetry)
1. don't do anything, I will import later.
Please select: 0
Changes are written to pyproject.toml.
```
<!--Describe the minimized example of how to reproduce the bug-->
## Actual behavior
A `pyproject.toml` is updated to be the following
```toml
[tool.poetry]
name = "example-project"
version = "2.0.0"
description = ""
authors = ["Amit Parekh <[email protected]>"]
packages = [{ from = "src", include = "example_project" }]
[tool.poetry.dependencies]
python = "^3.11"
torch = "^2.0.1"
torchdata = "^0.6.1"
orjson = "^3.8.5"
pydantic = "^1.10.4"
rich = "*"
loguru = "*"
typer = "*"
[tool.poetry.group.lint.dependencies]
black = "*"
mypy = "*"
pre-commit = "*"
ruff = "*"
[tool.poetry.group.tests.dependencies]
pytest = "*"
pytest-cases = "*"
pytest-cov = "*"
[tool.pdm.build]
includes = ["src/example_project"]
[project]
name = "example-project"
version = "2.0.0"
description = ""
authors = [
{name = "Amit Parekh", email = "[email protected]"},
]
dependencies = [
"torch<3.0.0,>=2.0.1",
"torchdata<1.0.0,>=0.6.1",
"orjson<4.0.0,>=3.8.5",
"pydantic<2.0.0,>=1.10.4",
"rich",
"loguru",
"typer",
]
requires-python = ">=3.11,<4.0"
readme = "README.md"
license = {text = "MIT"}
[project.group]
[project.group.lint.dependencies]
black = "*"
mypy = "*"
pre-commit = "*"
ruff = "*"
[project.group.tests.dependencies]
pytest = "*"
pytest-cases = "*"
pytest-cov = "*"
[build-system]
requires = ["pdm-backend"]
build-backend = "pdm.backend"
```
<!--A clear and concise description the result of the above steps-->
## Expected behavior
The development dependencies were migrated incorrectly. The `pyproject.toml` should be like this:
```toml
[tool.poetry]
name = "example-project"
version = "2.0.0"
description = ""
authors = ["Amit Parekh <[email protected]>"]
packages = [{ from = "src", include = "example_project" }]
[tool.poetry.dependencies]
python = "^3.11"
torch = "^2.0.1"
torchdata = "^0.6.1"
orjson = "^3.8.5"
pydantic = "^1.10.4"
rich = "*"
loguru = "*"
typer = "*"
[tool.poetry.group.lint.dependencies]
black = "*"
mypy = "*"
pre-commit = "*"
ruff = "*"
[tool.poetry.group.tests.dependencies]
pytest = "*"
pytest-cases = "*"
pytest-cov = "*"
[tool.pdm.build]
includes = ["src/example_project"]
[tool.pdm.dev-dependencies]
test = ["pytest", "pytest-cases", "pytest-cov"]
lint = ["black", "ruff", "mypy", "pre-commit"]
[project]
name = "example-project"
version = "2.0.0"
description = ""
authors = [
{ name = "Amit Parekh", email = "[email protected]" },
]
dependencies = [
"torch<3.0.0,>=2.0.1",
"torchdata<1.0.0,>=0.6.1",
"orjson<4.0.0,>=3.8.5",
"pydantic<2.0.0,>=1.10.4",
"rich",
"loguru",
"typer",
]
requires-python = ">=3.11,<4.0"
readme = "README.md"
license = { text = "MIT" }
[build-system]
requires = ["pdm-backend"]
build-backend = "pdm.backend"
```
<!--A clear and concise description of what you expected to happen.-->
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
PDM version:
2.9.2
Python Interpreter:
/Users/amit/Develop/test-prob/.venv/bin/python (3.11)
Project Root:
/Users/amit/Develop/test-prob
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.11.5",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "22.6.0",
"platform_system": "Darwin",
"platform_version": "Darwin Kernel Version 22.6.0: Fri Sep 15 13:39:52 PDT 2023; root:xnu-8796.141.3.700.8~1/RELEASE_X86_64",
"python_full_version": "3.11.5",
"platform_python_implementation": "CPython",
"python_version": "3.11",
"sys_platform": "darwin"
}
```
I haven't run `pdm install` yet after this step.
|
0.0
|
a551bdee1a6da649074cede0e09219dda12e144b
|
[
"tests/test_formats.py::test_convert_poetry_12"
] |
[
"tests/test_formats.py::test_convert_pipfile",
"tests/test_formats.py::test_convert_requirements_file[True]",
"tests/test_formats.py::test_convert_requirements_file[False]",
"tests/test_formats.py::test_convert_requirements_file_without_name",
"tests/test_formats.py::test_convert_poetry",
"tests/test_formats.py::test_convert_flit",
"tests/test_formats.py::test_convert_error_preserve_metadata",
"tests/test_formats.py::test_import_requirements_with_group",
"tests/test_formats.py::test_keep_env_vars_in_source",
"tests/test_formats.py::test_expand_env_vars_in_source",
"tests/test_formats.py::test_export_replace_project_root",
"tests/test_formats.py::test_convert_setup_py_project"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-12 08:12:55+00:00
|
mit
| 4,502 |
|
pdm-project__pdm-2343
|
diff --git a/news/2337.bugfix.md b/news/2337.bugfix.md
new file mode 100644
index 00000000..81360e3c
--- /dev/null
+++ b/news/2337.bugfix.md
@@ -0,0 +1,1 @@
+Fix list index out of range when build error message is empty.
\ No newline at end of file
diff --git a/news/2342.bugfix.md b/news/2342.bugfix.md
new file mode 100644
index 00000000..55d1cc07
--- /dev/null
+++ b/news/2342.bugfix.md
@@ -0,0 +1,1 @@
+Fix find_link sources being exported as `--extra--index-url`
diff --git a/src/pdm/builders/base.py b/src/pdm/builders/base.py
index 2f43f457..f5275e11 100644
--- a/src/pdm/builders/base.py
+++ b/src/pdm/builders/base.py
@@ -73,7 +73,7 @@ def build_error(e: subprocess.CalledProcessError) -> BuildError:
"""
output = cast("list[str]", e.output)
errors: list[str] = []
- if output[-1].strip().startswith("ModuleNotFoundError"):
+ if output and output[-1].strip().startswith("ModuleNotFoundError"):
package = output[-1].strip().split()[-1]
errors.append(
f"Module {package} is missing, please make sure it is specified in the "
diff --git a/src/pdm/formats/requirements.py b/src/pdm/formats/requirements.py
index f9fdf83d..1da87490 100644
--- a/src/pdm/formats/requirements.py
+++ b/src/pdm/formats/requirements.py
@@ -199,7 +199,13 @@ def export(
url = source["url"]
if options.expandvars:
url = expand_env_vars_in_auth(url)
- prefix = "--index-url" if source["name"] == "pypi" else "--extra-index-url"
+ source_type = source.get("type", "index")
+ if source_type == "index":
+ prefix = "--index-url" if source["name"] == "pypi" else "--extra-index-url"
+ elif source_type == "find_links":
+ prefix = "--find-links"
+ else:
+ raise ValueError(f"Unknown source type: {source_type}")
lines.append(f"{prefix} {url}\n")
if not source.get("verify_ssl", True):
host = urllib.parse.urlparse(url).hostname
|
pdm-project/pdm
|
2458ae3fe0b153f53bf512ac5d2819d85f1ed786
|
diff --git a/tests/test_formats.py b/tests/test_formats.py
index ea4c1728..b5deba0c 100644
--- a/tests/test_formats.py
+++ b/tests/test_formats.py
@@ -171,6 +171,13 @@ def test_expand_env_vars_in_source(project, monkeypatch):
assert result.strip().splitlines()[-1] == "--index-url https://foo:[email protected]/simple"
+def test_export_find_links(project, monkeypatch):
+ url = "https://storage.googleapis.com/jax-releases/jax_cuda_releases.html"
+ project.pyproject.settings["source"] = [{"url": url, "name": "jax", "type": "find_links"}]
+ result = requirements.export(project, [], Namespace(expandvars=False))
+ assert result.strip().splitlines()[-1] == f"--find-links {url}"
+
+
def test_export_replace_project_root(project):
artifact = FIXTURES / "artifacts/first-2.0.2-py2.py3-none-any.whl"
shutil.copy2(artifact, project.root)
|
pdm export requirements does not use --find-links
- [x] I have searched the issue tracker and believe that this is not a duplicate.
## Steps to reproduce
<!--Describe the minimized example of how to reproduce the bug-->
I would like to export a requirements.txt file for installation. In my pyproject.toml I have
```toml
[project]
dependencies = ['jax[cuda12_pip']
[[tool.pdm.source]]
name = "jax_cuda"
url = "https://storage.googleapis.com/jax-releases/jax_cuda_releases.html"
verify_ssl = true
type = "find_links"
```
However when I export requirements file, these are exposed as `--extra-index-url` as opposed to --find-links.
## Actual behavior
At the end of my exported requirements I get
```
--extra-index-url https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
```
## Expected behavior
I expect the find links in sources to be preserved during export. The above should be
```
--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
```
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
PDM version:
2.10.0
Python Interpreter:
/home/yicheng/project/.venv/bin/python (3.10)
Project Root:
/home/yicheng/project/
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.10.5",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "5.15.0-87-generic",
"platform_system": "Linux",
"platform_version": "#97~20.04.1-Ubuntu SMP Thu Oct 5 08:25:28 UTC 2023",
"python_full_version": "3.10.5",
"platform_python_implementation": "CPython",
"python_version": "3.10",
"sys_platform": "linux"
}
```
|
0.0
|
2458ae3fe0b153f53bf512ac5d2819d85f1ed786
|
[
"tests/test_formats.py::test_export_find_links"
] |
[
"tests/test_formats.py::test_convert_pipfile",
"tests/test_formats.py::test_convert_requirements_file[True]",
"tests/test_formats.py::test_convert_requirements_file[False]",
"tests/test_formats.py::test_convert_requirements_file_without_name",
"tests/test_formats.py::test_convert_poetry",
"tests/test_formats.py::test_convert_poetry_12",
"tests/test_formats.py::test_convert_flit",
"tests/test_formats.py::test_convert_error_preserve_metadata",
"tests/test_formats.py::test_import_requirements_with_group",
"tests/test_formats.py::test_keep_env_vars_in_source",
"tests/test_formats.py::test_expand_env_vars_in_source",
"tests/test_formats.py::test_export_replace_project_root",
"tests/test_formats.py::test_convert_setup_py_project"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-28 02:05:00+00:00
|
mit
| 4,503 |
|
pdm-project__pdm-2371
|
diff --git a/news/2273.enhancement.md b/news/2273.enhancement.md
deleted file mode 100644
index c740a83f..00000000
--- a/news/2273.enhancement.md
+++ /dev/null
@@ -1,2 +0,0 @@
-Add test coverage entrypoint in `pyproject.toml`
-Improve test coverage for untested functions in `pdm.utils`
\ No newline at end of file
diff --git a/news/2286.refactor.md b/news/2286.refactor.md
deleted file mode 100644
index affeceec..00000000
--- a/news/2286.refactor.md
+++ /dev/null
@@ -1,1 +0,0 @@
-Refactor `find_project_root()` and `find_pypackage()` to look for the project root recursively, and remove the `project_max_depth` configuration variable and the corresponding `PDM_PROJECT_MAX_DEPTH` environment variable.
diff --git a/news/2369.bugfix.md b/news/2369.bugfix.md
new file mode 100644
index 00000000..e903cda3
--- /dev/null
+++ b/news/2369.bugfix.md
@@ -0,0 +1,1 @@
+Fix a resolution issue that extra dependencies are not resolved when the bare dependency has more specific version constraint.
diff --git a/src/pdm/formats/requirements.py b/src/pdm/formats/requirements.py
index 1da87490..6cdfc89b 100644
--- a/src/pdm/formats/requirements.py
+++ b/src/pdm/formats/requirements.py
@@ -183,13 +183,18 @@ def export(
from pdm.models.candidates import Candidate
lines = ["# This file is @generated by PDM.\n# Please do not edit it manually.\n\n"]
+ collected_req: set[str] = set()
for candidate in sorted(candidates, key=lambda x: x.identify()): # type: ignore[attr-defined]
if isinstance(candidate, Candidate):
req = dataclasses.replace(candidate.req, specifier=get_specifier(f"=={candidate.version}"), marker=None)
else:
assert isinstance(candidate, Requirement)
req = candidate
+ line = project.backend.expand_line(req.as_line(), options.expandvars)
+ if line in collected_req:
+ continue
lines.append(project.backend.expand_line(req.as_line(), options.expandvars))
+ collected_req.add(line)
if options.hashes and getattr(candidate, "hashes", None):
for item in sorted({row["hash"] for row in candidate.hashes}): # type: ignore[attr-defined]
lines.append(f" \\\n --hash={item}")
diff --git a/src/pdm/models/candidates.py b/src/pdm/models/candidates.py
index bd7213bb..434df1ac 100644
--- a/src/pdm/models/candidates.py
+++ b/src/pdm/models/candidates.py
@@ -175,6 +175,16 @@ class Candidate:
def identify(self) -> str:
return self.req.identify()
+ def copy_with(self, requirement: Requirement) -> Candidate:
+ can = Candidate(requirement, name=self.name, version=self.version, link=self.link)
+ can.summary = self.summary
+ can.hashes = self.hashes
+ can._requires_python = self._requires_python
+ can._prepared = self._prepared
+ if can._prepared:
+ can._prepared.req = requirement
+ return can
+
@property
def dep_key(self) -> tuple[str, str | None]:
"""Key for retrieving and storing dependencies from the provider.
diff --git a/src/pdm/models/repositories.py b/src/pdm/models/repositories.py
index bb0c6e88..6fd07fbc 100644
--- a/src/pdm/models/repositories.py
+++ b/src/pdm/models/repositories.py
@@ -601,8 +601,7 @@ class LockedRepository(BaseRepository):
if not requirement.name:
# make sure can.identify() won't return a randomly-generated name
requirement.name = can.name
- can.req = requirement
- yield can
+ yield can.copy_with(requirement)
def get_hashes(self, candidate: Candidate) -> list[FileHash]:
return candidate.hashes
diff --git a/src/pdm/resolver/providers.py b/src/pdm/resolver/providers.py
index 757e87b1..4d3345c8 100644
--- a/src/pdm/resolver/providers.py
+++ b/src/pdm/resolver/providers.py
@@ -1,6 +1,5 @@
from __future__ import annotations
-import itertools
import os
from typing import TYPE_CHECKING, Callable, cast
@@ -159,15 +158,21 @@ class BaseProvider(AbstractProvider):
return (c for c in candidates if c not in incompat)
elif identifier in self.overrides:
return iter(self.get_override_candidates(identifier))
- reqs_iter = requirements[identifier]
+ reqs = sorted(requirements[identifier], key=self.requirement_preference)
+ original_req = reqs[0]
bare_name, extras = strip_extras(identifier)
if extras and bare_name in requirements:
# We should consider the requirements for both foo and foo[extra]
- reqs_iter = itertools.chain(reqs_iter, requirements[bare_name])
- reqs = sorted(reqs_iter, key=self.requirement_preference)
+ reqs.extend(requirements[bare_name])
+ reqs.sort(key=self.requirement_preference)
candidates = self._find_candidates(reqs[0])
return (
- can for can in candidates if can not in incompat and all(self.is_satisfied_by(r, can) for r in reqs)
+ # In some cases we will use candidates from the bare requirement,
+ # this will miss the extra dependencies if any. So we associate the original
+ # requirement back with the candidate since it is used by `get_dependencies()`.
+ can.copy_with(original_req) if extras else can
+ for can in candidates
+ if can not in incompat and all(self.is_satisfied_by(r, can) for r in reqs)
)
return matches_gen
|
pdm-project/pdm
|
19db6f5056711b6c9701a1bee559c6877dc3f76b
|
diff --git a/tests/cli/test_install.py b/tests/cli/test_install.py
index 853b0659..26f5cea7 100644
--- a/tests/cli/test_install.py
+++ b/tests/cli/test_install.py
@@ -287,3 +287,11 @@ def test_install_groups_and_lock(project, pdm, working_set):
assert project.lockfile.groups == ["tz"]
assert "pytz" in project.locked_repository.all_candidates
assert "urllib3" not in project.locked_repository.all_candidates
+
+
+def test_install_requirement_with_extras(project, pdm, working_set):
+ project.add_dependencies({"requests": parse_requirement("requests==2.19.1")})
+ project.add_dependencies({"requests[socks]": parse_requirement("requests[socks]")}, to_group="socks")
+ pdm(["lock", "-Gsocks"], obj=project, strict=True)
+ pdm(["sync", "-Gsocks"], obj=project, strict=True)
+ assert "pysocks" in working_set
|
Incorrect resolution for JAX
- [x] I have searched the issue tracker and believe that this is not a duplicate.
I am trying to specify JAX as a dependency for my project, but currently it is not resolved correctly by PDM.
For some additional context. JAX depends on a native dependency `jaxlib` for correct operation. This requires the user to specify the extra (e.g., jax[cpu], jax[cuda]) to install the correct jaxlib _variant_, where the CPU variant uses a version and the GPU wheels use a local version identifier to differentiate from the CPU one (and the user needs to use JAX's find link).
## Steps to reproduce
The following pyproject resolves incorrectly
```toml
[project]
name = "pdm-example"
version = "0.1.0"
description = ""
authors = [
{name = "Yicheng Luo", email = "[email protected]"},
]
dependencies = [
"jax==0.4.17",
]
requires-python = ">=3.10,<3.11"
readme = "README.md"
license = {text = "MIT"}
[project.optional-dependencies]
cpu = ["jax[cpu]"]
cuda = ["jax[cuda12_pip]"]
[build-system]
requires = ["pdm-backend"]
build-backend = "pdm.backend"
# [[tool.pdm.source]]
# name = "jax_cuda"
# url = "https://storage.googleapis.com/jax-releases/jax_cuda_releases.html"
# verify_ssl = true
# type = "find_links"
[tool.pdm.resolution]
respect-source-order = true
```
If I run ` pdm lock -G cpu` and the lockfile looks like
```
# This file is @generated by PDM.
# It is not intended for manual editing.
[metadata]
groups = ["default", "cpu"]
strategy = ["cross_platform"]
lock_version = "4.4"
content_hash = "sha256:c7c792eac140bf630ef85a5d708263e9598ace33bba32675d6aade1ac6a881b4"
[[package]]
name = "jax"
version = "0.4.17"
requires_python = ">=3.9"
summary = "Differentiate, compile, and transform Numpy code."
dependencies = [
"ml-dtypes>=0.2.0",
"numpy>=1.22",
"opt-einsum",
"scipy>=1.7",
]
files = [
{file = "jax-0.4.17-py3-none-any.whl", hash = "sha256:c3ab72ea2f1c5d8ccf2561e79f6562fb2964629f3e55b3ac1c11c48b64c20336"},
{file = "jax-0.4.17.tar.gz", hash = "sha256:d7508a69e87835f534cb07a2f21d79cc1cb8c4cfdcf7fb010927267ef7355f1d"},
]
[[package]]
name = "jax"
version = "0.4.17"
extras = ["cpu"]
requires_python = ">=3.9"
summary = "Differentiate, compile, and transform Numpy code."
dependencies = [
"jax==0.4.17",
"jaxlib==0.4.17",
]
files = [
{file = "jax-0.4.17-py3-none-any.whl", hash = "sha256:c3ab72ea2f1c5d8ccf2561e79f6562fb2964629f3e55b3ac1c11c48b64c20336"},
{file = "jax-0.4.17.tar.gz", hash = "sha256:d7508a69e87835f534cb07a2f21d79cc1cb8c4cfdcf7fb010927267ef7355f1d"},
]
[[package]]
name = "jaxlib"
version = "0.4.17"
requires_python = ">=3.9"
summary = "XLA library for JAX"
dependencies = [
"ml-dtypes>=0.2.0",
"numpy>=1.22",
"scipy>=1.7",
]
files = [
{file = "jaxlib-0.4.17-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d4be1ac4bf1be1ae1cd8f5f4da414a6d0de8de36cf2effdb5758d4d677896078"},
{file = "jaxlib-0.4.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:392c779f902c43e1a0af49159daffef9b5af952aba001463f98cf95a59ef17ff"},
{file = "jaxlib-0.4.17-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:160fce68b82a79a6c522652e8dd9a10aac9c00d1599cb7e166671ad909aa139e"},
{file = "jaxlib-0.4.17-cp310-cp310-win_amd64.whl", hash = "sha256:61b3788c6cfe46f307e6e67d4a942de72cf34711ff349f4f11500cdf6dc67199"},
]
[[package]]
name = "ml-dtypes"
version = "0.3.1"
requires_python = ">=3.9"
summary = ""
dependencies = [
"numpy>1.20",
"numpy>=1.21.2; python_version > \"3.9\"",
]
files = [
{file = "ml_dtypes-0.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:510d249a91face47211762eb294d6fe64f325356b965fb6388c1bf51bd339267"},
{file = "ml_dtypes-0.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f83ff080df8910c0f987f615b03e4f8198638e0c00c6e679ea8892dda909763b"},
{file = "ml_dtypes-0.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcae2c69715410d96906e1dfe8f017d9f78a0d10e0df91aae52e91f51fdfe45e"},
{file = "ml_dtypes-0.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:da274599e4950a9b488d21571061f49a185537cc77f2d3f8121151d58a9e9f16"},
{file = "ml_dtypes-0.3.1.tar.gz", hash = "sha256:60778f99194b4c4f36ba42da200b35ef851ce4d4af698aaf70f5b91fe70fc611"},
]
[[package]]
name = "numpy"
version = "1.26.1"
requires_python = "<3.13,>=3.9"
summary = "Fundamental package for array computing in Python"
files = [
{file = "numpy-1.26.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82e871307a6331b5f09efda3c22e03c095d957f04bf6bc1804f30048d0e5e7af"},
{file = "numpy-1.26.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdd9ec98f0063d93baeb01aad472a1a0840dee302842a2746a7a8e92968f9575"},
{file = "numpy-1.26.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d78f269e0c4fd365fc2992c00353e4530d274ba68f15e968d8bc3c69ce5f5244"},
{file = "numpy-1.26.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ab9163ca8aeb7fd32fe93866490654d2f7dda4e61bc6297bf72ce07fdc02f67"},
{file = "numpy-1.26.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:78ca54b2f9daffa5f323f34cdf21e1d9779a54073f0018a3094ab907938331a2"},
{file = "numpy-1.26.1-cp310-cp310-win32.whl", hash = "sha256:d1cfc92db6af1fd37a7bb58e55c8383b4aa1ba23d012bdbba26b4bcca45ac297"},
{file = "numpy-1.26.1-cp310-cp310-win_amd64.whl", hash = "sha256:d2984cb6caaf05294b8466966627e80bf6c7afd273279077679cb010acb0e5ab"},
{file = "numpy-1.26.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06934e1a22c54636a059215d6da99e23286424f316fddd979f5071093b648668"},
{file = "numpy-1.26.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76ff661a867d9272cd2a99eed002470f46dbe0943a5ffd140f49be84f68ffc42"},
{file = "numpy-1.26.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6965888d65d2848e8768824ca8288db0a81263c1efccec881cb35a0d805fcd2f"},
{file = "numpy-1.26.1.tar.gz", hash = "sha256:c8c6c72d4a9f831f328efb1312642a1cafafaa88981d9ab76368d50d07d93cbe"},
]
[[package]]
name = "opt-einsum"
version = "3.3.0"
requires_python = ">=3.5"
summary = "Optimizing numpys einsum function"
dependencies = [
"numpy>=1.7",
]
files = [
{file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"},
{file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"},
]
[[package]]
name = "scipy"
version = "1.11.3"
requires_python = "<3.13,>=3.9"
summary = "Fundamental algorithms for scientific computing in Python"
dependencies = [
"numpy<1.28.0,>=1.21.6",
]
files = [
{file = "scipy-1.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:370f569c57e1d888304052c18e58f4a927338eafdaef78613c685ca2ea0d1fa0"},
{file = "scipy-1.11.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9885e3e4f13b2bd44aaf2a1a6390a11add9f48d5295f7a592393ceb8991577a3"},
{file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04aa19acc324a1a076abb4035dabe9b64badb19f76ad9c798bde39d41025cdc"},
{file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1a8a4657673bfae1e05e1e1d6e94b0cabe5ed0c7c144c8aa7b7dbb774ce5c1"},
{file = "scipy-1.11.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7abda0e62ef00cde826d441485e2e32fe737bdddee3324e35c0e01dee65e2a88"},
{file = "scipy-1.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:033c3fd95d55012dd1148b201b72ae854d5086d25e7c316ec9850de4fe776929"},
{file = "scipy-1.11.3.tar.gz", hash = "sha256:bba4d955f54edd61899776bad459bf7326e14b9fa1c552181f0479cc60a568cd"},
]
```
For `pdm export -G cpu` I get
```
# This file is @generated by PDM.
# Please do not edit it manually.
jax==0.4.17
jax==0.4.17
ml-dtypes==0.3.1
numpy==1.26.1
opt-einsum==3.3.0
scipy==1.11.3
```
## Actual behavior
There are a few issues with the resolution above
1. Duplicate entries of JAX. I saw duplicate entries in the exported requirements.
2. When I selectively export the CPU group, jax should include jaxlib as a dependency, but it is currently missing from the exported requirements. This also causes problems with `pdm install` as jaxlib is also not installed.
This is not expected.
## Expected behavior
I expect that when using the CPU group to install dependencies, jaxlib should be included as a dependnecy.
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
PDM version:
2.10.0
Python Interpreter:
/workspace/.venv/bin/python (3.10)
Project Root:
/workspace
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.10.13",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "5.15.0-87-generic",
"platform_system": "Linux",
"platform_version": "#97~20.04.1-Ubuntu SMP Thu Oct 5 08:25:28 UTC 2023",
"python_full_version": "3.10.13",
"platform_python_implementation": "CPython",
"python_version": "3.10",
"sys_platform": "linux"
}
```
## Updates
I tried a few things, it seems that if you also pin the version in extra then things work, but I find that to be surprising.
|
0.0
|
19db6f5056711b6c9701a1bee559c6877dc3f76b
|
[
"tests/cli/test_install.py::test_install_requirement_with_extras"
] |
[
"tests/cli/test_install.py::test_sync_packages_with_group_all",
"tests/cli/test_install.py::test_sync_packages_with_all_dev",
"tests/cli/test_install.py::test_sync_no_lockfile",
"tests/cli/test_install.py::test_sync_clean_packages",
"tests/cli/test_install.py::test_sync_dry_run",
"tests/cli/test_install.py::test_sync_only_different",
"tests/cli/test_install.py::test_sync_in_sequential_mode",
"tests/cli/test_install.py::test_sync_packages_with_groups",
"tests/cli/test_install.py::test_sync_production_packages[prod_option0]",
"tests/cli/test_install.py::test_sync_production_packages[prod_option1]",
"tests/cli/test_install.py::test_sync_without_self",
"tests/cli/test_install.py::test_install_command",
"tests/cli/test_install.py::test_sync_command",
"tests/cli/test_install.py::test_install_with_lockfile",
"tests/cli/test_install.py::test_install_with_dry_run",
"tests/cli/test_install.py::test_install_no_lock",
"tests/cli/test_install.py::test_install_check",
"tests/cli/test_install.py::test_sync_with_only_keep_option",
"tests/cli/test_install.py::test_install_referencing_self_package",
"tests/cli/test_install.py::test_install_monorepo_with_rel_paths",
"tests/cli/test_install.py::test_install_retry",
"tests/cli/test_install.py::test_install_fail_fast",
"tests/cli/test_install.py::test_install_groups_not_in_lockfile",
"tests/cli/test_install.py::test_install_locked_groups",
"tests/cli/test_install.py::test_install_groups_and_lock"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-06 03:43:48+00:00
|
mit
| 4,504 |
|
pdm-project__pdm-2694
|
diff --git a/docs/docs/usage/scripts.md b/docs/docs/usage/scripts.md
index 483d7c1b..12c97c66 100644
--- a/docs/docs/usage/scripts.md
+++ b/docs/docs/usage/scripts.md
@@ -108,6 +108,8 @@ all = {composite = ["lint", "test"]}
Running `pdm run all` will run `lint` first and then `test` if `lint` succeeded.
++++ 2.13.0
+
To override the default behavior and continue the execution of the remaining
scripts after a failure, set the `keep_going` option to `true`:
@@ -179,6 +181,20 @@ start.env_file.override = ".env"
!!! note
A dotenv file specified on a composite task level will override those defined by called tasks.
+### `working_dir`
+
++++ 2.13.0
+
+You can set the current working directory for the script:
+
+```toml
+[tool.pdm.scripts]
+start.cmd = "flask run -p 54321"
+start.working_dir = "subdir"
+```
+
+Relative paths are resolved against the project root.
+
### `site_packages`
To make sure the running environment is properly isolated from the outer Python interpreter,
diff --git a/news/2620.feature.md b/news/2620.feature.md
new file mode 100644
index 00000000..e3ad4ffb
--- /dev/null
+++ b/news/2620.feature.md
@@ -0,0 +1,1 @@
+Add an option `working_dir` for PDM's scripts to set the current working directory.
diff --git a/src/pdm/cli/commands/run.py b/src/pdm/cli/commands/run.py
index 36f7ed64..506072eb 100644
--- a/src/pdm/cli/commands/run.py
+++ b/src/pdm/cli/commands/run.py
@@ -36,11 +36,12 @@ if TYPE_CHECKING:
help: str
keep_going: bool
site_packages: bool
+ working_dir: str
def exec_opts(*options: TaskOptions | None) -> dict[str, Any]:
return dict(
- env={k: v for opts in options if opts for k, v in opts.get("env", {}).items()},
+ env={k: v for opts in options if opts for k, v in opts.get("env", {}).items()} or None,
**{k: v for opts in options if opts for k, v in opts.items() if k not in ("env", "help")},
)
@@ -104,7 +105,7 @@ class TaskRunner:
"""The task runner for pdm project"""
TYPES = ("cmd", "shell", "call", "composite")
- OPTIONS = ("env", "env_file", "help", "keep_going", "site_packages")
+ OPTIONS = ("env", "env_file", "help", "keep_going", "working_dir", "site_packages")
def __init__(self, project: Project, hooks: HookManager) -> None:
self.project = project
@@ -159,6 +160,7 @@ class TaskRunner:
site_packages: bool = False,
env: Mapping[str, str] | None = None,
env_file: EnvFileOptions | str | None = None,
+ working_dir: str | None = None,
) -> int:
"""Run command in a subprocess and return the exit code."""
project = self.project
@@ -213,7 +215,7 @@ class TaskRunner:
# Don't load system site-packages
process_env["NO_SITE_PACKAGES"] = "1"
- cwd = project.root if chdir else None
+ cwd = (project.root / working_dir) if working_dir else project.root if chdir else None
def forward_signal(signum: int, frame: FrameType | None) -> None:
if sys.platform == "win32" and signum == signal.SIGINT:
@@ -285,12 +287,7 @@ class TaskRunner:
return code
composite_code = code
return composite_code
- return self._run_process(
- args,
- chdir=True,
- shell=shell,
- **exec_opts(self.global_options, options, opts),
- )
+ return self._run_process(args, chdir=True, shell=shell, **exec_opts(self.global_options, options, opts))
def run(self, command: str, args: list[str], opts: TaskOptions | None = None, chdir: bool = False) -> int:
if command in self.hooks.skip:
@@ -312,11 +309,7 @@ class TaskRunner:
self.hooks.try_emit("post_script", script=command, args=args)
return code
else:
- return self._run_process(
- [command, *args],
- chdir=chdir,
- **exec_opts(self.global_options, opts),
- )
+ return self._run_process([command, *args], chdir=chdir, **exec_opts(self.global_options, opts))
def show_list(self) -> None:
if not self.project.scripts:
diff --git a/src/pdm/termui.py b/src/pdm/termui.py
index 22eee13c..2984571e 100644
--- a/src/pdm/termui.py
+++ b/src/pdm/termui.py
@@ -8,6 +8,7 @@ import tempfile
import warnings
from typing import TYPE_CHECKING
+import rich
from rich.box import ROUNDED
from rich.console import Console
from rich.progress import Progress, ProgressColumn
@@ -36,21 +37,21 @@ DEFAULT_THEME = {
"info": "blue",
"req": "bold green",
}
-_console = Console(highlight=False, theme=Theme(DEFAULT_THEME))
+rich.reconfigure(highlight=False, theme=Theme(DEFAULT_THEME))
_err_console = Console(stderr=True, theme=Theme(DEFAULT_THEME))
def is_interactive(console: Console | None = None) -> bool:
"""Check if the terminal is run under interactive mode"""
if console is None:
- console = _console
+ console = rich.get_console()
return console.is_interactive
def is_legacy_windows(console: Console | None = None) -> bool:
"""Legacy Windows renderer may have problem rendering emojis"""
if console is None:
- console = _console
+ console = rich.get_console()
return console.legacy_windows
@@ -61,6 +62,7 @@ def style(text: str, *args: str, style: str | None = None, **kwargs: Any) -> str
:param style: rich style to apply to whole string
:return: string containing ansi codes
"""
+ _console = rich.get_console()
if _console.legacy_windows or not _console.is_terminal: # pragma: no cover
return text
with _console.capture() as capture:
@@ -176,7 +178,7 @@ class UI:
:param theme: dict of theme
"""
- _console.push_theme(theme)
+ rich.get_console().push_theme(theme)
_err_console.push_theme(theme)
def echo(
@@ -193,7 +195,7 @@ class UI:
:param verbosity: verbosity level, defaults to QUIET.
"""
if self.verbosity >= verbosity:
- console = _err_console if err else _console
+ console = _err_console if err else rich.get_console()
if not console.is_interactive:
kwargs.setdefault("crop", False)
kwargs.setdefault("overflow", "ignore")
@@ -223,7 +225,7 @@ class UI:
for row in rows:
table.add_row(*row)
- _console.print(table)
+ rich.print(table)
@contextlib.contextmanager
def logging(self, type_: str = "install") -> Iterator[logging.Logger]:
@@ -276,12 +278,7 @@ class UI:
def make_progress(self, *columns: str | ProgressColumn, **kwargs: Any) -> Progress:
"""create a progress instance for indented spinners"""
- return Progress(
- *columns,
- console=_console,
- disable=self.verbosity >= Verbosity.DETAIL,
- **kwargs,
- )
+ return Progress(*columns, disable=self.verbosity >= Verbosity.DETAIL, **kwargs)
def info(self, message: str, verbosity: Verbosity = Verbosity.QUIET) -> None:
"""Print a message to stdout."""
|
pdm-project/pdm
|
1fac8c911e663bbee22f2151d8e7d8d8ab4ae6e5
|
diff --git a/tests/cli/test_run.py b/tests/cli/test_run.py
index e2d4c4d9..6e1e4c03 100644
--- a/tests/cli/test_run.py
+++ b/tests/cli/test_run.py
@@ -900,3 +900,15 @@ def test_empty_positional_args_display_help(project, pdm):
assert "Usage:" in result.output
assert "Commands:" in result.output
assert "Options:" in result.output
+
+
+def test_run_script_changing_working_dir(project, pdm, capfd):
+ project.root.joinpath("subdir").mkdir()
+ project.root.joinpath("subdir", "file.text").write_text("Hello world\n")
+ project.pyproject.settings["scripts"] = {
+ "test_script": {"working_dir": "subdir", "cmd": "cat file.text"},
+ }
+ project.pyproject.write()
+ capfd.readouterr()
+ pdm(["run", "test_script"], obj=project, strict=True)
+ assert capfd.readouterr()[0].strip() == "Hello world"
|
Add an option for PDM's scripts to set the current working directory
## Is your feature/enhancement proposal related to a problem? Please describe.
This issue suggest adding a new key for commands defined under `tool.pdm.scripts`, to set the current working directory before running the given action.
Consider the following project structure:
```
my_project/
├─ docs/
│ ├─ source/
│ ├─ Makefile
│ ├─ make.bat
├─ src/
│ ├─ my_package/
├─ pyproject.toml
```
This is a recurring structure given Sphinx's proposed setup.
There are commands that can only be ran in the `docs/` directory, due to the nature of `make` that uses the `Makefile` available at the root of the current working directory.
## Describe the solution you'd like
This key, which could be called `cwd`, or `in_path`, or any given suggestion, would provide an OS-agnostic way to set the current working directory.
When a command will be running, and this property is set with a relative path (In this example `./docs`), PDM should come back to the root of the project (`pdm info --where`), so that moving in directories is seamless, and would not surprise us in case our current working directory was in `src/my_package`, for example.
Alternatively, this could be done by using operators like `&&` in a shell script, but such operator (In my knowledge) could be different depending on the OS used.
Adding such key would, in my opinion, increase confidence when creating new scripts that require to move to specific directories.
- - -
I am willing to work on this issue and submit a PR.
|
0.0
|
1fac8c911e663bbee22f2151d8e7d8d8ab4ae6e5
|
[
"tests/cli/test_run.py::test_run_script_changing_working_dir"
] |
[
"tests/cli/test_run.py::test_auto_isolate_site_packages",
"tests/cli/test_run.py::test_run_with_site_packages",
"tests/cli/test_run.py::test_run_command_not_found",
"tests/cli/test_run.py::test_run_pass_exit_code",
"tests/cli/test_run.py::test_run_cmd_script",
"tests/cli/test_run.py::test_run_cmd_script_with_array",
"tests/cli/test_run.py::test_run_script_pass_project_root",
"tests/cli/test_run.py::test_run_shell_script",
"tests/cli/test_run.py::test_run_script_with_relative_path",
"tests/cli/test_run.py::test_run_non_existing_local_script",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder[with-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder[without-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder_with_default[with-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder_with_default[with-default]",
"tests/cli/test_run.py::test_run_call_script",
"tests/cli/test_run.py::test_run_script_with_extra_args",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-str-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-str-without-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-list-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-list-without-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-str-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-str-default]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-list-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-list-default]",
"tests/cli/test_run.py::test_run_shell_script_with_pdm_placeholder",
"tests/cli/test_run.py::test_run_expand_env_vars",
"tests/cli/test_run.py::test_run_expand_env_vars_from_config",
"tests/cli/test_run.py::test_run_script_with_env_defined",
"tests/cli/test_run.py::test_run_script_with_dotenv_file",
"tests/cli/test_run.py::test_run_script_override_global_env",
"tests/cli/test_run.py::test_run_show_list_of_scripts",
"tests/cli/test_run.py::test_run_show_list_of_scripts_hide_internals",
"tests/cli/test_run.py::test_run_json_list_of_scripts",
"tests/cli/test_run.py::test_import_another_sitecustomize",
"tests/cli/test_run.py::test_run_with_patched_sysconfig",
"tests/cli/test_run.py::test_run_composite",
"tests/cli/test_run.py::test_composite_stops_on_first_failure",
"tests/cli/test_run.py::test_composite_keep_going_on_failure",
"tests/cli/test_run.py::test_composite_inherit_env",
"tests/cli/test_run.py::test_composite_fail_on_first_missing_task",
"tests/cli/test_run.py::test_composite_runs_all_hooks",
"tests/cli/test_run.py::test_composite_pass_parameters_to_subtasks",
"tests/cli/test_run.py::test_composite_can_pass_parameters",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args[with-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args[without-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args_with_default[with-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args_with_default[default]",
"tests/cli/test_run.py::test_composite_hooks_inherit_env",
"tests/cli/test_run.py::test_composite_inherit_env_in_cascade",
"tests/cli/test_run.py::test_composite_inherit_dotfile",
"tests/cli/test_run.py::test_composite_can_have_commands",
"tests/cli/test_run.py::test_run_shortcut",
"tests/cli/test_run.py::test_run_shortcuts_dont_override_commands",
"tests/cli/test_run.py::test_run_shortcut_fail_with_usage_if_script_not_found",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[unknown",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[not",
"tests/cli/test_run.py::test_empty_positional_args_display_help"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-15 01:52:16+00:00
|
mit
| 4,505 |
|
pdm-project__pdm-2817
|
diff --git a/news/2816.bugfix.md b/news/2816.bugfix.md
new file mode 100644
index 00000000..9123b053
--- /dev/null
+++ b/news/2816.bugfix.md
@@ -0,0 +1,1 @@
+Don't cache editable installations.
diff --git a/src/pdm/installers/manager.py b/src/pdm/installers/manager.py
index 1835cfba..2ad07e54 100644
--- a/src/pdm/installers/manager.py
+++ b/src/pdm/installers/manager.py
@@ -33,7 +33,7 @@ class InstallManager:
prepared.build(),
self.environment,
direct_url=prepared.direct_url(),
- install_links=self.use_install_cache,
+ install_links=self.use_install_cache and not candidate.req.editable,
rename_pth=self.rename_pth,
)
return Distribution.at(dist_info)
|
pdm-project/pdm
|
a1b0cb24d51cfd4ab3760c86e432416f5bb6f7b3
|
diff --git a/tests/test_installer.py b/tests/test_installer.py
index 451b6c70..60501b11 100644
--- a/tests/test_installer.py
+++ b/tests/test_installer.py
@@ -2,6 +2,7 @@ from __future__ import annotations
import logging
import os
+from pathlib import Path
from typing import Callable
import pytest
@@ -167,6 +168,35 @@ def test_url_requirement_is_not_cached(project):
assert dist.read_text("direct_url.json")
+def test_editable_is_not_cached(project, tmp_path_factory):
+ editable_path: Path = tmp_path_factory.mktemp("editable-project")
+
+ editable_setup = editable_path / "setup.py"
+ editable_setup.write_text("""
+from setuptools import setup
+
+setup(name='editable-project',
+ version='0.1.0',
+ description='',
+ py_modules=['module'],
+)
+""")
+ editable_module = editable_path / "module.py"
+ editable_module.write_text("")
+
+ req = parse_requirement(f"file://{editable_path}#egg=editable-project", True)
+ candidate = Candidate(req)
+ installer = InstallManager(project.environment, use_install_cache=True)
+ installer.install(candidate)
+
+ cache_path = project.cache("packages") / "editable_project-0.1.0-0.editable-py3-none-any.whl.cache"
+ assert not cache_path.is_dir()
+ lib_path = Path(project.environment.get_paths()["purelib"])
+ for pth in lib_path.glob("*editable_project*.pth"):
+ assert pth.is_file()
+ assert not pth.is_symlink()
+
+
@pytest.mark.parametrize("use_install_cache", [False, True])
def test_install_wheel_with_data_scripts(project, use_install_cache):
req = parse_requirement("jmespath")
|
Editable install shouldn't be cached
- [x] I have searched the issue tracker and believe that this is not a duplicate.
**Make sure you run commands with `-v` flag before pasting the output.**
## Steps to reproduce
There are multiple reproducible issues with editable caching. I hit multiple since the 2.14 release, here's the main cases
### Different projects, same name
Just create 2 different `pdm` self-installable projects (`distribution=true`) with the same name (`test-project` or `demo` are realistic candidates) without using git.
The first project editable self-install will be OK.
The 2nd one will have the same cache key (`demo-0.1.0-0.editable-py3-none-any.whl.cache`) and instead of being self installed, it will be linked to the first one (`import demo` import project 1 instead of 2).
### Same project, multiple clones
Clone twice the same installable project (general case will be `HEAD=main`) and `pdm install` them.
Same result as the previous case, the 2nd one is linked to the first one. If you change branch on the first project, editable install version won't change and 2nd project will silently have the 1st project version.
### Self or dependency
The same cases occur for both self-install or for editable dependencies.
## Actual behavior
<!--A clear and concise description the result of the above steps-->
Editable installs packages are cached leading self-installs or multiple installs in different envs collide and then to be mixed
## Expected behavior
<!--A clear and concise description of what you expected to happen.-->
Editable installs are not cached and can be used across multiple projects without collisions and mixups
## Environment Information
```bash
PDM version:
2.14.0
Python Interpreter:
/home/noirbizarre/Workspaces/python.copier/.venv/bin/python (3.11)
Project Root:
/home/noirbizarre/Workspaces/python.copier
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.11.8",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "6.8.5-arch1-1",
"platform_system": "Linux",
"platform_version": "#1 SMP PREEMPT_DYNAMIC Thu, 11 Apr 2024 01:47:33 +0000",
"python_full_version": "3.11.8",
"platform_python_implementation": "CPython",
"python_version": "3.11",
"sys_platform": "linux"
}
```
|
0.0
|
a1b0cb24d51cfd4ab3760c86e432416f5bb6f7b3
|
[
"tests/test_installer.py::test_editable_is_not_cached"
] |
[
"tests/test_installer.py::test_install_wheel_with_inconsistent_dist_info",
"tests/test_installer.py::test_install_with_file_existing",
"tests/test_installer.py::test_uninstall_commit_rollback",
"tests/test_installer.py::test_rollback_after_commit",
"tests/test_installer.py::test_uninstall_with_console_scripts[False]",
"tests/test_installer.py::test_uninstall_with_console_scripts[True]",
"tests/test_installer.py::test_install_wheel_with_cache[symlink]",
"tests/test_installer.py::test_install_wheel_with_cache[hardlink]",
"tests/test_installer.py::test_install_wheel_with_cache[None]",
"tests/test_installer.py::test_url_requirement_is_not_cached",
"tests/test_installer.py::test_install_wheel_with_data_scripts[False]",
"tests/test_installer.py::test_install_wheel_with_data_scripts[True]",
"tests/test_installer.py::test_compress_file_list_for_rename"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-04-15 09:47:52+00:00
|
mit
| 4,506 |
|
pdm-project__pdm-2850
|
diff --git a/news/2849.bugfix.md b/news/2849.bugfix.md
new file mode 100644
index 00000000..3bef6128
--- /dev/null
+++ b/news/2849.bugfix.md
@@ -0,0 +1,1 @@
+Fix env and other options being inherited in nested composite scripts.
\ No newline at end of file
diff --git a/src/pdm/cli/commands/run.py b/src/pdm/cli/commands/run.py
index 69f64f24..06d0891e 100644
--- a/src/pdm/cli/commands/run.py
+++ b/src/pdm/cli/commands/run.py
@@ -277,6 +277,8 @@ class TaskRunner:
should_interpolate = should_interpolate or any(RE_PDM_PLACEHOLDER.search(script) for script in value)
composite_code = 0
keep_going = options.pop("keep_going", False) if options else False
+ if opts:
+ cast(dict, options).update(**exec_opts(options, opts))
for script in value:
if should_interpolate:
script, _ = interpolate(script, args)
|
pdm-project/pdm
|
ddaa420aabc5708d2adbd14e2aca0ec5433a6fd0
|
diff --git a/tests/cli/test_run.py b/tests/cli/test_run.py
index e97e5c0f..4b4e4af5 100644
--- a/tests/cli/test_run.py
+++ b/tests/cli/test_run.py
@@ -640,7 +640,15 @@ def test_composite_inherit_env(project, pdm, capfd, _echo):
"cmd": "python echo.py Second VAR",
"env": {"VAR": "42"},
},
- "test": {"composite": ["first", "second"], "env": {"VAR": "overriden"}},
+ "nested": {
+ "composite": ["third"],
+ "env": {"VAR": "42"},
+ },
+ "third": {
+ "cmd": "python echo.py Third VAR",
+ "env": {"VAR": "42"},
+ },
+ "test": {"composite": ["first", "second", "nested"], "env": {"VAR": "overriden"}},
}
project.pyproject.write()
capfd.readouterr()
@@ -648,6 +656,7 @@ def test_composite_inherit_env(project, pdm, capfd, _echo):
out, _ = capfd.readouterr()
assert "First CALLED with VAR=overriden" in out
assert "Second CALLED with VAR=overriden" in out
+ assert "Third CALLED with VAR=overriden" in out
def test_composite_fail_on_first_missing_task(project, pdm, capfd, _echo):
|
ENV is not inherited in nested composite scripts
- [x] I have searched the issue tracker and believe that this is not a duplicate.
Environment variables are inherited from composite scripts to regular commands, but not between nested composite scripts.
## Steps to reproduce
<!--Describe the minimized example of how to reproduce the bug-->
- Create three scripts:
- `test` composite, which calls ...
- `nested` composite, which calls ...
- `foo` script
- Scripts `test` and `foo` define different values for `env.VAR`
- optional: `nested` also defines a value for `env.VAR`
- Call the `test` script: `pdm run -v test`
```toml
[tool.pdm.scripts]
test.composite = ["nested"]
test.env.VAR = "test"
nested.composite = ["foo"]
# nested.env.VAR = "nested"
foo.cmd = "python -c \"print('${VAR}')\""
foo.env.VAR = "bar"
```
## Actual behavior
<!--A clear and concise description the result of the above steps-->
Running `test` when `env.VAR` is not overridden in `nested`:
```shell
Running <task test>: []
Running <task nested>: []
Running <task foo>: ['python', '-c', "print('${VAR}')"]
bar
```
In this case, `VAR` gets the original value defined in `foo`.
Running `test` after uncommenting `nested.env.VAR = "nested"`:
```shell
Running <task test>: []
Running <task nested>: []
Running <task foo>: ['python', '-c', "print('${VAR}')"]
nested
```
Now, we see the overridden value defined in `nested`.
## Expected behavior
<!--A clear and concise description of what you expected to happen.-->
In both cases, I would expect `VAR` to be `test`, as defined in the `test` script.
## Environment Information
```bash
PDM version:
2.15.0
Python Interpreter:
redacted/.venv/bin/python (3.12)
Project Root:
redacted
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.12.0",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_release": "5.15.133.1-microsoft-standard-WSL2",
"platform_system": "Linux",
"platform_version": "#1 SMP Thu Oct 5 21:02:42 UTC 2023",
"python_full_version": "3.12.0",
"platform_python_implementation": "CPython",
"python_version": "3.12",
"sys_platform": "linux"
}
```
|
0.0
|
ddaa420aabc5708d2adbd14e2aca0ec5433a6fd0
|
[
"tests/cli/test_run.py::test_composite_inherit_env"
] |
[
"tests/cli/test_run.py::test_auto_isolate_site_packages",
"tests/cli/test_run.py::test_run_with_site_packages",
"tests/cli/test_run.py::test_run_command_not_found",
"tests/cli/test_run.py::test_run_pass_exit_code",
"tests/cli/test_run.py::test_run_cmd_script",
"tests/cli/test_run.py::test_run_cmd_script_with_array",
"tests/cli/test_run.py::test_run_script_pass_project_root",
"tests/cli/test_run.py::test_run_shell_script",
"tests/cli/test_run.py::test_run_script_with_relative_path",
"tests/cli/test_run.py::test_run_non_existing_local_script",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder[with-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder[without-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder_with_default[with-args]",
"tests/cli/test_run.py::test_run_shell_script_with_args_placeholder_with_default[with-default]",
"tests/cli/test_run.py::test_run_call_script",
"tests/cli/test_run.py::test_run_script_with_extra_args",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-str-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-str-without-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-list-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder[as-list-without-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-str-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-str-default]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-list-with-args]",
"tests/cli/test_run.py::test_run_script_with_args_placeholder_with_default[as-list-default]",
"tests/cli/test_run.py::test_run_shell_script_with_pdm_placeholder",
"tests/cli/test_run.py::test_run_expand_env_vars",
"tests/cli/test_run.py::test_run_expand_env_vars_from_config",
"tests/cli/test_run.py::test_run_script_with_env_defined",
"tests/cli/test_run.py::test_run_script_with_dotenv_file",
"tests/cli/test_run.py::test_run_script_override_global_env",
"tests/cli/test_run.py::test_run_show_list_of_scripts",
"tests/cli/test_run.py::test_run_show_list_of_scripts_hide_internals",
"tests/cli/test_run.py::test_run_json_list_of_scripts",
"tests/cli/test_run.py::test_import_another_sitecustomize",
"tests/cli/test_run.py::test_run_with_patched_sysconfig",
"tests/cli/test_run.py::test_run_composite",
"tests/cli/test_run.py::test_composite_stops_on_first_failure",
"tests/cli/test_run.py::test_composite_keep_going_on_failure",
"tests/cli/test_run.py::test_composite_fail_on_first_missing_task",
"tests/cli/test_run.py::test_composite_fails_on_recursive_script",
"tests/cli/test_run.py::test_composite_runs_all_hooks",
"tests/cli/test_run.py::test_composite_pass_parameters_to_subtasks",
"tests/cli/test_run.py::test_composite_can_pass_parameters",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args[with-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args[without-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args_with_default[with-args]",
"tests/cli/test_run.py::test_composite_only_pass_parameters_to_subtasks_with_args_with_default[default]",
"tests/cli/test_run.py::test_composite_hooks_inherit_env",
"tests/cli/test_run.py::test_composite_inherit_env_in_cascade",
"tests/cli/test_run.py::test_composite_inherit_dotfile",
"tests/cli/test_run.py::test_composite_can_have_commands",
"tests/cli/test_run.py::test_run_shortcut",
"tests/cli/test_run.py::test_run_shortcuts_dont_override_commands",
"tests/cli/test_run.py::test_run_shortcut_fail_with_usage_if_script_not_found",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[unknown",
"tests/cli/test_run.py::test_empty_positionnal_args_still_display_usage[not",
"tests/cli/test_run.py::test_empty_positional_args_display_help",
"tests/cli/test_run.py::test_run_script_changing_working_dir"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-04-26 02:00:40+00:00
|
mit
| 4,507 |
|
pdm-project__pdm-2858
|
diff --git a/news/2853.bugfix.md b/news/2853.bugfix.md
new file mode 100644
index 00000000..c3541790
--- /dev/null
+++ b/news/2853.bugfix.md
@@ -0,0 +1,1 @@
+Make `direct_minimal_versions` work on newly added dependencies.
diff --git a/src/pdm/cli/commands/add.py b/src/pdm/cli/commands/add.py
index 5f56bc52..df4d2ce1 100644
--- a/src/pdm/cli/commands/add.py
+++ b/src/pdm/cli/commands/add.py
@@ -155,6 +155,8 @@ class Command(BaseCommand):
reqs = [
r for g, deps in all_dependencies.items() if lock_groups is None or g in lock_groups for r in deps.values()
]
+ # pre-write the dependencies to the pyproject.toml to make them recognized by the resolver
+ project.add_dependencies(requirements, group, selection.dev or False, write=False)
with hooks.skipping("post_lock"):
resolved = do_lock(
project,
diff --git a/src/pdm/project/core.py b/src/pdm/project/core.py
index fa3a3ea1..89014ebb 100644
--- a/src/pdm/project/core.py
+++ b/src/pdm/project/core.py
@@ -570,6 +570,7 @@ class Project:
to_group: str = "default",
dev: bool = False,
show_message: bool = True,
+ write: bool = True,
) -> None:
deps, setter = self.use_pyproject_dependencies(to_group, dev)
for _, dep in requirements.items():
@@ -583,7 +584,8 @@ class Project:
else:
deps[matched_index] = req
setter(cast(Array, deps).multiline(True))
- self.pyproject.write(show_message)
+ if write:
+ self.pyproject.write(show_message)
def init_global_project(self) -> None:
if not self.is_global or not self.pyproject.empty():
|
pdm-project/pdm
|
d5acc2700f9495325411ff10b0e4971b19c296fc
|
diff --git a/tests/cli/test_add.py b/tests/cli/test_add.py
index 564938e7..22a5270d 100644
--- a/tests/cli/test_add.py
+++ b/tests/cli/test_add.py
@@ -256,6 +256,7 @@ def test_add_cached_vcs_requirement(project, mocker, pdm):
@pytest.mark.usefixtures("repository")
def test_add_with_dry_run(project, pdm):
result = pdm(["add", "--dry-run", "requests"], obj=project, strict=True)
+ project.pyproject.reload()
assert not project.get_dependencies()
assert "requests 2.19.1" in result.stdout
assert "urllib3 1.22" in result.stdout
@@ -330,3 +331,14 @@ def test_add_disable_cache(project, pdm, working_set):
files = [file for file in cache_dir.rglob("*") if file.is_file()]
assert not files
+
+
[email protected]("working_set")
+def test_add_dependency_with_direct_minimal_versions(project, pdm, repository):
+ pdm(["lock", "-S", "direct_minimal_versions"], obj=project, strict=True)
+ repository.add_candidate("pytz", "2019.6")
+ pdm(["add", "django"], obj=project, strict=True)
+ all_candidates = project.locked_repository.all_candidates
+ assert "django>=1.11.8" in project.pyproject.metadata["dependencies"]
+ assert all_candidates["django"].version == "1.11.8"
+ assert all_candidates["pytz"].version == "2019.6"
|
Inconsistent behaviour of `pdm add` in mode direct_minimal_versions
- [ x] I have searched the issue tracker and believe that this is not a duplicate.
## Steps to reproduce
1. Create an empty project, with no dependencies
2. run `pdm lock --strategy direct_minimal_versions` to activate the locking strategy
3. run `pdm add django`. This will modify the pyproject file to install the latest compatible django package (-> `django>=4.2.11` for python 3.9)
4. run again `pdm add django`. The pyproject file will now be modified to install the earliest compatible django package (-> `django>=1.1.3`)
## Expected behavior
* Running `pdm add` a second time should have no effect
## Environment Information
```bash
# Paste the output of `pdm info && pdm info --env` below:
PDM version:
2.15.1
Python Interpreter:
D:\tmp\test_pdm\.venv\Scripts\python.exe (3.9)
Project Root:
D:/tmp/test_pdm
Local Packages:
{
"implementation_name": "cpython",
"implementation_version": "3.9.13",
"os_name": "nt",
"platform_machine": "AMD64",
"platform_release": "10",
"platform_system": "Windows",
"platform_version": "10.0.19045",
"python_full_version": "3.9.13",
"platform_python_implementation": "CPython",
"python_version": "3.9",
"sys_platform": "win32"
}
```
## Discussion
* One could wonder whether `pdm add` should look for the latest or earliest compatible version on its first call. IMHO, the latest version should be sought, even in direct_minimal_versions strategy, since that strategy specifies how the lock file consistency with the pyproject file is enforced, not how the pyproject file is specified
* Another issue is what `pdm add` should do with respect to what is already specified in the pyproject file. The documentation doesn't say much about the policies followed by PDM if `pdm add` is called for an already-specified dependency
* When reading the documentation, it is not obvious how to reliably increase the lower bounds of the dependencies specified in the pyproject file with PDM. Should we always first edit the pyproject file and then run `pdm install`?
Thanks for the awesome work done on PDM!
|
0.0
|
d5acc2700f9495325411ff10b0e4971b19c296fc
|
[
"tests/cli/test_add.py::test_add_dependency_with_direct_minimal_versions"
] |
[
"tests/cli/test_add.py::test_add_package[False]",
"tests/cli/test_add.py::test_add_package[True]",
"tests/cli/test_add.py::test_add_package_no_lock[False]",
"tests/cli/test_add.py::test_add_package_no_lock[True]",
"tests/cli/test_add.py::test_add_command",
"tests/cli/test_add.py::test_add_package_to_custom_group",
"tests/cli/test_add.py::test_add_package_to_custom_dev_group",
"tests/cli/test_add.py::test_add_editable_package",
"tests/cli/test_add.py::test_add_editable_package_to_metadata_forbidden",
"tests/cli/test_add.py::test_non_editable_override_editable",
"tests/cli/test_add.py::test_add_remote_package_url[False]",
"tests/cli/test_add.py::test_add_remote_package_url[True]",
"tests/cli/test_add.py::test_add_no_install",
"tests/cli/test_add.py::test_add_package_save_exact",
"tests/cli/test_add.py::test_add_package_save_wildcard",
"tests/cli/test_add.py::test_add_package_save_minimum",
"tests/cli/test_add.py::test_add_package_update_reuse",
"tests/cli/test_add.py::test_add_package_update_eager",
"tests/cli/test_add.py::test_add_package_with_mismatch_marker",
"tests/cli/test_add.py::test_add_dependency_from_multiple_parents",
"tests/cli/test_add.py::test_add_packages_without_self",
"tests/cli/test_add.py::test_add_package_unconstrained_rewrite_specifier",
"tests/cli/test_add.py::test_add_cached_vcs_requirement",
"tests/cli/test_add.py::test_add_with_dry_run",
"tests/cli/test_add.py::test_add_with_prerelease",
"tests/cli/test_add.py::test_add_editable_package_with_extras",
"tests/cli/test_add.py::test_add_package_with_local_version",
"tests/cli/test_add.py::test_add_group_to_lockfile",
"tests/cli/test_add.py::test_add_group_to_lockfile_without_package",
"tests/cli/test_add.py::test_add_update_reuse_installed",
"tests/cli/test_add.py::test_add_update_reuse_installed_config",
"tests/cli/test_add.py::test_add_disable_cache"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-04-29 01:35:21+00:00
|
mit
| 4,508 |
|
pdpipe__pdpipe-90
|
diff --git a/.gitignore b/.gitignore
index ca9049f..8b436c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -104,3 +104,6 @@ tags
# don't like pipfiles
Pipfile
Pipfile.lock
+
+# Temporary testing file
+notebooks/debug*.py
diff --git a/pdpipe/core.py b/pdpipe/core.py
index 0eb75f8..36ee8c8 100644
--- a/pdpipe/core.py
+++ b/pdpipe/core.py
@@ -810,7 +810,8 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
raise NotImplementedError
def _post_transform_lock(self):
- self.application_context.lock()
+ # Application context is discarded after pipeline application
+ self.application_context = None
self.fit_context.lock()
def apply(
@@ -819,7 +820,8 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise: Optional[bool] = None,
verbose: Optional[bool] = False,
time: Optional[bool] = False,
- context: Optional[dict] = {},
+ fit_context: Optional[dict] = {},
+ application_context: Optional[dict] = {},
):
"""Applies this pipeline stage to the given dataframe.
@@ -844,10 +846,14 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
time : bool, default False
If True, per-stage application time is measured and reported when
pipeline application is done.
- context : dict, optional
+ fit_context : dict, option
+ Context for the entire pipeline, is retained after the pipeline
+ application is completed.
+ application_context : dict, optional
Context to add to the application context of this call. Can map
str keys to arbitrary object values to be used by pipeline stages
- during this pipeline application.
+ during this pipeline application. Discarded after pipeline
+ application.
Returns
-------
@@ -860,7 +866,7 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise=exraise,
verbose=verbose,
time=time,
- context=context,
+ application_context=application_context,
)
return res
res = self.fit_transform(
@@ -868,7 +874,8 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise=exraise,
verbose=verbose,
time=time,
- context=context,
+ fit_context=fit_context,
+ application_context=application_context,
)
return res
@@ -878,12 +885,13 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
y: Optional[Iterable] = None,
exraise: Optional[bool] = None,
verbose: Optional[bool] = False,
- context: Optional[dict] = {},
+ fit_context: Optional[dict] = {},
+ application_context: Optional[dict] = {},
):
self.fit_context = PdpApplicationContext()
- self.fit_context.update(context)
+ self.fit_context.update(fit_context)
self.application_context = PdpApplicationContext()
- self.application_context.update(context)
+ self.application_context.update(application_context)
inter_x = X
times = []
prev = time.time()
@@ -918,7 +926,8 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise: Optional[bool] = None,
verbose: Optional[bool] = False,
time: Optional[bool] = False,
- context: Optional[dict] = {},
+ fit_context: Optional[dict] = {},
+ application_context: Optional[dict] = {},
):
"""Fits this pipeline and transforms the input dataframe.
@@ -942,10 +951,14 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
time : bool, default False
If True, per-stage application time is measured and reported when
pipeline application is done.
- context : dict, optional
+ fit_context : dict, option
+ Context for the entire pipeline, is retained after the pipeline
+ application is completed.
+ application_context : dict, optional
Context to add to the application context of this call. Can map
str keys to arbitrary object values to be used by pipeline stages
- during this pipeline application.
+ during this pipeline application. Discarded after pipeline
+ application.
Returns
-------
@@ -954,12 +967,15 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
"""
if time:
return self.__timed_fit_transform(
- X=X, y=y, exraise=exraise, verbose=verbose, context=context)
+ X=X, y=y, exraise=exraise,
+ verbose=verbose,
+ fit_context=fit_context,
+ application_context=application_context)
inter_x = X
self.application_context = PdpApplicationContext()
- self.application_context.update(context)
+ self.application_context.update(application_context)
self.fit_context = PdpApplicationContext()
- self.fit_context.update(context)
+ self.fit_context.update(fit_context)
for i, stage in enumerate(self._stages):
try:
stage.fit_context = self.fit_context
@@ -985,7 +1001,8 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise: Optional[bool] = None,
verbose: Optional[bool] = False,
time: Optional[bool] = False,
- context: Optional[dict] = {},
+ fit_context: Optional[dict] = {},
+ application_context: Optional[dict] = {},
):
"""Fits this pipeline without transforming the input dataframe.
@@ -1009,7 +1026,10 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
time : bool, default False
If True, per-stage application time is measured and reported when
pipeline application is done.
- context : dict, optional
+ fit_context : dict, option
+ Context for the entire pipeline, is retained after the pipeline
+ application is completed.
+ application_context : dict, optional
Context to add to the application context of this call. Can map
str keys to arbitrary object values to be used by pipeline stages
during this pipeline application.
@@ -1025,7 +1045,8 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise=exraise,
verbose=verbose,
time=time,
- context=context,
+ fit_context=fit_context,
+ application_context=application_context,
)
return X
@@ -1035,13 +1056,13 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
y: Optional[Iterable[float]] = None,
exraise: Optional[bool] = None,
verbose: Optional[bool] = None,
- context: Optional[dict] = {},
+ application_context: Optional[dict] = {},
) -> pandas.DataFrame:
inter_x = X
times = []
prev = time.time()
self.application_context = PdpApplicationContext()
- self.application_context.update(context)
+ self.application_context.update(application_context)
for i, stage in enumerate(self._stages):
try:
stage.fit_context = self.fit_context
@@ -1073,7 +1094,7 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
exraise: Optional[bool] = None,
verbose: Optional[bool] = None,
time: Optional[bool] = False,
- context: Optional[dict] = {},
+ application_context: Optional[dict] = {},
) -> pandas.DataFrame:
"""Transforms the given dataframe without fitting this pipeline.
@@ -1100,7 +1121,7 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
time : bool, default False
If True, per-stage application time is measured and reported when
pipeline application is done.
- context : dict, optional
+ application_context : dict, optional
Context to add to the application context of this call. Can map
str keys to arbitrary object values to be used by pipeline stages
during this pipeline application.
@@ -1117,12 +1138,14 @@ class PdPipeline(PdPipelineStage, collections.abc.Sequence):
" unfitted!").format(stage))
if time:
return self.__timed_transform(
- X=X, y=y, exraise=exraise, verbose=verbose, context=context)
+ X=X, y=y, exraise=exraise, verbose=verbose,
+ application_context=application_context)
inter_df = X
self.application_context = PdpApplicationContext()
- self.application_context.update(context)
+ self.application_context.update(application_context)
for i, stage in enumerate(self._stages):
try:
+ stage.fit_context = self.fit_context
stage.application_context = self.application_context
inter_df = stage.transform(
X=inter_df,
|
pdpipe/pdpipe
|
9d04fdd75736e37d576eebb575b28f4df667e9d8
|
diff --git a/tests/core/test_app_context.py b/tests/core/test_app_context.py
index 947aa77..f6e0fb7 100644
--- a/tests/core/test_app_context.py
+++ b/tests/core/test_app_context.py
@@ -132,7 +132,7 @@ def test_application_context_injection():
assert len(pipeline) == 2
df = _test_df()
val = randint(840, 921)
- res_df = pipeline.apply(df, verbose=True, context={'a': val})
+ res_df = pipeline.apply(df, verbose=True, fit_context={'a': val})
assert 'num1' in res_df.columns
assert 'num1+val' in res_df.columns
assert 'num2' in res_df.columns
|
Application context objects should not be kept by default + add way to supply fit context
pdpipe uses `PdpApplicationContext` objects in two ways:
1. As the `fit_context` that should be kept as-is after a fit, and used by stages to pass to one another parameters that should also be used on transform time.
2. As the `application_context` that should be discarded after a specific application is done, and is used by stages to feed consecutive stages with context. It can be added to by supplying `apply(context={})`, `fit_transform(context={})` or `transform(context={})` with a `dict` that will be used to update the application context.
Two changes are required:
1. At the moment there is a single `context` parameter to application functions that is used to update both the fit and the application context. I think they should be two, one for each type of context.
2. At the moment the `application_context` is not discarded when the application is done. It's as simple as `self.application_context = None` expression added at the `PdPipeline` level in the couple of right cases.
|
0.0
|
9d04fdd75736e37d576eebb575b28f4df667e9d8
|
[
"tests/core/test_app_context.py::test_application_context_injection"
] |
[
"tests/core/test_app_context.py::test_application_context",
"tests/core/test_app_context.py::test_application_context_pickling",
"tests/core/test_app_context.py::test_application_context_unit",
"tests/core/test_app_context.py::test_context_with_adhoc_stage"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-02-22 08:11:03+00:00
|
mit
| 4,509 |
|
pedrokiefer__aiostomp-17
|
diff --git a/aiostomp/protocol.py b/aiostomp/protocol.py
index 6936ae2..6b3edfb 100644
--- a/aiostomp/protocol.py
+++ b/aiostomp/protocol.py
@@ -70,7 +70,10 @@ class StompProtocol(object):
def _parse_data(self, data):
if not self._intermediate_frame:
- command, data = data.split(b'\n', 1)
+ try:
+ command, data = data.split(b'\n', 1)
+ except ValueError:
+ return None, data
command = self._decode(command)
self._intermediate_frame = {'command': command}
|
pedrokiefer/aiostomp
|
3b9097a96aa84c5d78fdff15bea3a267761733a4
|
diff --git a/tests/test_main.py b/tests/test_main.py
index cd38b84..06a8540 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -275,6 +275,52 @@ class TestStompReader(AsyncTestCase):
await asyncio.sleep(0.001)
message_handle_mock.assert_called_once()
+ @patch('aiostomp.aiostomp.StompReader._handle_message')
+ @unittest_run_loop
+ async def test_can_process_long_messages(self, message_handle_mock):
+ stomp = StompReader(None, self.loop)
+
+ await asyncio.sleep(0.001)
+
+ data = b'stent:true\ntimestamp:1548945234003\n\n111.11.11.111\x00\n' \
+ b'MESSAGE\n' \
+ b'content-length:14\nexpires:0\ndestination:/topic/' \
+ b'xxxxxxxxxxxxxxxxxxxxxxxxxl' \
+ b'\nsubscription:1\npriority:4\nActiveMQ.MQTT.QoS:1\nmessage-id' \
+ b':ID\\cxxxxxx-35207-1543430467768-204' \
+ b'\\c363\\c-1\\c1\\c463859\npersistent:true\ntimestamp' \
+ b':1548945234003\n\n222.222.22.222' \
+ b'\x00\nMESSAGE\ncontent-length:12\nexpires:0\ndestination:' \
+ b'/topic/xxxxxxxxxxxxxxxxxxxxxxxxxx' \
+ b'\nsubscription:1\npriority:4\nActiveMQ.MQTT.QoS:1\nmessage-id' \
+ b':ID\\cxxxxxx-35207-1543430467768-204' \
+ b'\\c363\\c-1\\c1\\c463860\npersistent:true\ntimestamp' \
+ b':1548945234005\n\n88.88.888.88' \
+ b'\x00\nMESSAGE\ncontent-length:11\nexpires:0\ndestination:' \
+ b'/topic/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' \
+ b'\nsubscription:1\npriority:4\nActiveMQ.MQTT.QoS:1\nmessage-id' \
+ b':ID\\cxxxxxx-35207-1543430467768-204'\
+ b'\\c362\\c-1\\c1\\c290793\npersistent:true\ntimestamp' \
+ b':1548945234005\n\n111.11.1.11' \
+ b'\x00\nMESSAGE\ncontent-length:14\nexpires:0\ndestination:' \
+ b'/topic/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' \
+ b'\nsubscription:1\npriority:4\nActiveMQ.MQTT.QoS:1\nmessage-id' \
+ b':ID\\cxxxxxx-35207-1543430467768-204' \
+ b'\\c362\\c-1\\c1\\c290794\npersistent:true\ntimestamp:' \
+ b'1548945234005\n\n222.222.22.222' \
+ b'\x00\nMESSAGE\ncontent-length:12\nexpires:0\ndestination:' \
+ b'/topic/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' \
+ b'\nsubscription:1\npriority:4\nActiveMQ.MQTT.QoS:1\nmessage-id' \
+ b':ID\\cxxxxxx-35207-1543430467768-204' \
+ b'\\c362\\c-1\\c1\\c290795\npersistent:true\ntimestamp:' \
+ b'1548945234005\n\n88.88.888.88\x00\nMESS'
+
+ stomp.data_received(data)
+
+ await asyncio.sleep(0.001)
+ self.assertEqual(message_handle_mock.call_count, 5)
+ self.assertEqual(stomp._protocol._pending_parts, [b'MESS'])
+
@patch('aiostomp.aiostomp.StompReader._handle_message')
@unittest_run_loop
async def test_consecutive_calls_data_received(self, message_handle_mock):
|
ValueError while parsing data
Since version 1.4.0 I constantly get the following error:
```
Traceback (most recent call last):
File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/usr/lib/python3.6/asyncio/selector_events.py", line 732, in _read_ready
self._protocol.data_received(data)
File "/lib/python3.6/site-packages/aiostomp/aiostomp.py", line 388, in data_received
self._protocol.feed_data(data)
File "/lib/python3.6/site-packages/aiostomp/protocol.py", line 43, in feed_data
pending_data = self._feed_data(pending_data)
File "/lib/python3.6/site-packages/aiostomp/protocol.py", line 57, in _feed_data
b''.join(self._pending_parts))
File "/lib/python3.6/site-packages/aiostomp/protocol.py", line 75, in _parse_data
command, data = data.split(b'\n', 1)
ValueError: not enough values to unpack (expected 2, got 1)
```
An examples of `data` at this point are (when I print them out):
- `b'MESSAGE'`
- `b'ME'`
- `b'MESSAG'`
- `b'MES'`
I'm using Python 3.6.3 and connecting to an ActiveMQ Broker.
I'm facing this issue since 1.4.0.
1.3.0 works fine
Please let me know if I can do anything to help
|
0.0
|
3b9097a96aa84c5d78fdff15bea3a267761733a4
|
[
"tests/test_main.py::TestStompReader::test_can_process_long_messages"
] |
[
"tests/test_main.py::TestStompStats::test_can_increment_a_field",
"tests/test_main.py::TestStompStats::test_can_increment_a_missing_field",
"tests/test_main.py::TestStompStats::test_can_print_stats",
"tests/test_main.py::TestStompReader::test_accept_version_header",
"tests/test_main.py::TestStompReader::test_can_close_connection",
"tests/test_main.py::TestStompReader::test_can_close_connection_no_heartbeat",
"tests/test_main.py::TestStompReader::test_can_connect",
"tests/test_main.py::TestStompReader::test_can_connect_with_login_pass",
"tests/test_main.py::TestStompReader::test_can_connect_with_password",
"tests/test_main.py::TestStompReader::test_can_connect_with_username",
"tests/test_main.py::TestStompReader::test_can_handle_connected_frame_with_heartbeat",
"tests/test_main.py::TestStompReader::test_can_handle_connected_frame_with_heartbeat_disabled",
"tests/test_main.py::TestStompReader::test_can_handle_connected_frame_without_heartbeat",
"tests/test_main.py::TestStompReader::test_can_handle_error_frame",
"tests/test_main.py::TestStompReader::test_can_handle_exception",
"tests/test_main.py::TestStompReader::test_can_handle_message",
"tests/test_main.py::TestStompReader::test_can_handle_message_can_ack",
"tests/test_main.py::TestStompReader::test_can_handle_message_can_nack",
"tests/test_main.py::TestStompReader::test_can_handle_message_with_no_subscription",
"tests/test_main.py::TestStompReader::test_can_process_connected_frame",
"tests/test_main.py::TestStompReader::test_can_process_empty_message",
"tests/test_main.py::TestStompReader::test_can_process_error",
"tests/test_main.py::TestStompReader::test_can_process_exception",
"tests/test_main.py::TestStompReader::test_can_process_heartbeat",
"tests/test_main.py::TestStompReader::test_can_process_messages",
"tests/test_main.py::TestStompReader::test_can_receive_eof",
"tests/test_main.py::TestStompReader::test_can_send_frame",
"tests/test_main.py::TestStompReader::test_connection_can_be_lost",
"tests/test_main.py::TestStompReader::test_connection_can_be_lost_no_heartbeat",
"tests/test_main.py::TestStompReader::test_connection_can_be_made",
"tests/test_main.py::TestStompReader::test_consecutive_calls_data_received",
"tests/test_main.py::TestStompReader::test_send_frame_can_raise_error",
"tests/test_main.py::TestStompReader::test_transport_is_closed_connection_close",
"tests/test_main.py::TestAioStomp::test_aiostomp_supports_ssl",
"tests/test_main.py::TestAioStomp::test_can_close_connection",
"tests/test_main.py::TestAioStomp::test_can_connect_to_server",
"tests/test_main.py::TestAioStomp::test_can_get_subscription",
"tests/test_main.py::TestAioStomp::test_can_reconnect_on_connection_lost",
"tests/test_main.py::TestAioStomp::test_can_reconnect_to_server",
"tests/test_main.py::TestAioStomp::test_can_reconnect_to_server_with_max_attemps",
"tests/test_main.py::TestAioStomp::test_can_send_message_with_body_binary",
"tests/test_main.py::TestAioStomp::test_can_send_message_with_body_utf8",
"tests/test_main.py::TestAioStomp::test_can_send_message_with_body_without_content_lenght",
"tests/test_main.py::TestAioStomp::test_can_send_message_without_body",
"tests/test_main.py::TestAioStomp::test_can_subscribe",
"tests/test_main.py::TestAioStomp::test_can_subscribe_when_connected",
"tests/test_main.py::TestAioStomp::test_can_unsubscribe",
"tests/test_main.py::TestAioStomp::test_cannot_unsubscribe_when_not_subcribed",
"tests/test_main.py::TestAioStomp::test_no_reconnect_on_close",
"tests/test_main.py::TestAioStomp::test_reconnection",
"tests/test_main.py::TestAioStomp::test_reconnection_error",
"tests/test_main.py::TestAioStomp::test_subscribe_after_connection",
"tests/test_main.py::TestStompProtocol::test_can_close",
"tests/test_main.py::TestStompProtocol::test_can_create_a_connection",
"tests/test_main.py::TestStompProtocol::test_can_create_a_connection_with_ssl_context",
"tests/test_main.py::TestStompProtocol::test_can_send",
"tests/test_main.py::TestStompProtocol::test_can_subscribe",
"tests/test_main.py::TestStompProtocol::test_can_unsubscribe"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-02-11 13:49:21+00:00
|
mit
| 4,510 |
|
pennsignals__dsdk-37
|
diff --git a/src/dsdk/utils.py b/src/dsdk/utils.py
index a9d68af..9debc0d 100644
--- a/src/dsdk/utils.py
+++ b/src/dsdk/utils.py
@@ -6,6 +6,10 @@ from __future__ import annotations
import pickle
from collections import OrderedDict
from datetime import datetime
+from functools import wraps
+from logging import NullHandler, getLogger
+from time import sleep as default_sleep
+from typing import Callable, Sequence
from warnings import warn
from configargparse import ArgParser
@@ -29,6 +33,10 @@ except ImportError:
MongoClient = None
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
def get_base_config() -> ArgParser:
"""Get the base configuration parser."""
config_parser = ArgParser(
@@ -124,3 +132,48 @@ class WriteOnceDict(OrderedDict):
if key in self:
raise KeyError("{} has already been set".format(key))
super(WriteOnceDict, self).__setitem__(key, value)
+
+
+def retry(
+ exceptions: Sequence[Exception],
+ retries: int = 5,
+ delay: float = 1.0,
+ backoff: float = 1.5,
+ sleep: Callable = default_sleep,
+):
+ """
+ Retry calling the decorated function using an exponential backoff.
+
+ Args:
+ exceptions: The exception to check. may be a tuple of
+ exceptions to check.
+ retries: Number of times to retry before giving up.
+ delay: Initial delay between retries in seconds.
+ backoff: Backoff multiplier (e.g. value of 2 will double the delay
+ each retry).
+ """
+ delay = float(delay)
+ backoff = float(backoff)
+
+ def wrapper(func):
+ @wraps(func)
+ def wrapped(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except exceptions as exception:
+ logger.exception(exception)
+ wait = delay
+ for _ in range(retries):
+ message = f"Retrying in {wait:.2f} seconds..."
+ logger.warning(message)
+ sleep(wait)
+ wait *= backoff
+ try:
+ return func(*args, **kwargs)
+ except exceptions as exception:
+ logger.exception(exception)
+ raise
+
+ return wrapped
+
+ return wrapper
|
pennsignals/dsdk
|
5cf83cfc0950ad49b25bd86e6525ea42b1cb0165
|
diff --git a/tests/test_dsdk.py b/tests/test_dsdk.py
index a8399e2..c3b79a6 100644
--- a/tests/test_dsdk.py
+++ b/tests/test_dsdk.py
@@ -6,6 +6,7 @@ from unittest.mock import Mock
import configargparse
from dsdk import BaseBatchJob, Block
+from dsdk.utils import retry
def test_batch(monkeypatch):
@@ -23,3 +24,67 @@ def test_batch(monkeypatch):
batch.run()
assert len(batch.evidence) == 1
assert batch.evidence["test"] == 42
+
+
+def test_retry_other_exception():
+ """Test retry other exception."""
+
+ exceptions_in = [
+ RuntimeError("what?"),
+ NotImplementedError("how?"),
+ RuntimeError("no!"),
+ ]
+ actual = []
+ expected = [1.0, 1.5, 2.25]
+
+ def sleep(wait: float):
+ actual.append(wait)
+
+ @retry(
+ (NotImplementedError, RuntimeError),
+ retries=4,
+ delay=1.0,
+ backoff=1.5,
+ sleep=sleep,
+ )
+ def explode():
+ raise exceptions_in.pop()
+
+ try:
+ explode()
+ raise AssertionError("IndexError expected")
+ except IndexError:
+ assert actual == expected
+
+
+def test_retry_exhausted():
+ """Test retry."""
+
+ exceptions_in = [
+ RuntimeError("what?"),
+ NotImplementedError("how?"),
+ RuntimeError("no!"),
+ NotImplementedError("when?"),
+ ]
+ actual = []
+ expected = [1.0, 1.5]
+
+ def sleep(wait: float):
+ actual.append(wait)
+
+ @retry(
+ (NotImplementedError, RuntimeError),
+ retries=2,
+ delay=1.0,
+ backoff=1.5,
+ sleep=sleep,
+ )
+ def explode():
+ raise exceptions_in.pop()
+
+ try:
+ explode()
+ raise AssertionError("NotImplementedError expected")
+ except NotImplementedError as exception:
+ assert actual == expected
+ assert str(exception) == "when?"
|
Add retry decorator
Pull the retry decorator up from cdi sepsis.
Fix:
- for loop inside exception handler after initial attempt.
- spurious delay after the final retry attempt.
- accumulated floating point errors in the delay.
Add:
+ default argument for `sleep=time.sleep` function to replace duration in test.
+ default argument for `warn=logger.warn` function to replace
stderr/stdout during test.
Logging appearing to come from `dsdk.util.retry` may not be useful. Passing the client module's `warn=logger.warn` during decoration allows the retry attempt to be logged in the client module's stderr or stdout stream.
|
0.0
|
5cf83cfc0950ad49b25bd86e6525ea42b1cb0165
|
[
"tests/test_dsdk.py::test_retry_other_exception",
"tests/test_dsdk.py::test_retry_exhausted"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-01-22 18:56:14+00:00
|
mit
| 4,511 |
|
peopledoc__septentrion-96
|
diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index 90ec480..00b31f6 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -116,7 +116,7 @@ data yet.
.. code-block:: console
- $ septentrion --target-version 1.0 show-migrations
+ $ septentrion show-migrations
Current version is None
Target version is 1.0
@@ -129,21 +129,16 @@ Great, we can now run it for real:
.. code-block:: console
- $ septentrion --target-version 1.0 migrate
+ $ septentrion migrate
Applying migrations
Version 1.0
-
.. note::
You should run *septentrion* in your root directory, where your ``migrations`` folder is.
-.. note::
-
- The ``--target-version`` flag is a required option (it might change in the future).
-
If something is not working as it should be, you probably want to check the
:ref:`troubleshooting guide <troubleshoot>`
@@ -214,7 +209,7 @@ We launch the migration.
.. code-block:: console
- $ septentrion --target-version 2.0 migrate
+ $ septentrion migrate
Applying migrations
Version 1.0
diff --git a/septentrion/cli.py b/septentrion/cli.py
index 8fed1d0..043510e 100644
--- a/septentrion/cli.py
+++ b/septentrion/cli.py
@@ -43,9 +43,11 @@ CONTEXT_SETTINGS = {
"max_content_width": 120,
}
+LATEST_VERSION = "latest"
+
def validate_version(ctx: click.Context, param: Any, value: str):
- if value is None:
+ if value == LATEST_VERSION:
return None
try:
version = versions.Version.from_string(value)
@@ -138,13 +140,14 @@ class CommaSeparatedMultipleString(StringParamType):
@click.option(
"--target-version",
help="Desired final version of the Database (env: SEPTENTRION_TARGET_VERSION)",
+ default=LATEST_VERSION,
callback=validate_version,
- required=True,
)
@click.option(
"--schema-version",
help="Version of the initial schema (if not specified, the most recent schema "
"will be used) (env: SEPTENTRION_SCHEMA_VERSION)",
+ default=LATEST_VERSION,
callback=validate_version,
)
@click.option(
diff --git a/septentrion/configuration.py b/septentrion/configuration.py
index 2c3d6d3..e7bd35c 100644
--- a/septentrion/configuration.py
+++ b/septentrion/configuration.py
@@ -40,6 +40,8 @@ DEFAULTS = {
"fixtures_template": "fixtures_{}.sql",
"non_transactional_keyword": ["CONCURRENTLY", "ALTER TYPE", "VACUUM"],
"ignore_symlinks": False,
+ "schema_version": None,
+ "target_version": None,
# Values that don't have an explicit default need to be present too
"verbosity": 0,
"host": None,
@@ -47,7 +49,6 @@ DEFAULTS = {
"username": None,
"password": False,
"dbname": None,
- "schema_version": None,
"additional_schema_file": [],
"before_schema_file": [],
"after_schema_file": [],
@@ -146,13 +147,17 @@ class Settings:
migrations_root = pathlib.Path(migrations_root)
return migrations_root
- def clean_schema_version(self, version: Union[str, versions.Version]):
+ def clean_schema_version(
+ self, version: Union[None, str, versions.Version]
+ ) -> Optional[versions.Version]:
if isinstance(version, str):
version = versions.Version.from_string(version)
return version
- def clean_target_version(self, version: Union[str, versions.Version]):
+ def clean_target_version(
+ self, version: Union[None, str, versions.Version]
+ ) -> Optional[versions.Version]:
if isinstance(version, str):
version = versions.Version.from_string(version)
diff --git a/septentrion/core.py b/septentrion/core.py
index 6da08c8..ec77f8a 100644
--- a/septentrion/core.py
+++ b/septentrion/core.py
@@ -39,7 +39,7 @@ def get_applied_versions(
# not force_version
def get_closest_version(
settings: configuration.Settings,
- target_version: versions.Version,
+ target_version: Optional[versions.Version],
sql_tpl: str,
existing_files: Iterable[str],
force_version: Optional[versions.Version] = None,
@@ -52,13 +52,16 @@ def get_closest_version(
known_versions = files.get_known_versions(settings=settings)
# find target version
- try:
- previous_versions = list(utils.until(known_versions, target_version))
- except ValueError:
- raise ValueError(
- "settings.TARGET_VERSION is improperly configured: "
- "version {} not found.".format(target_version)
- )
+ if not target_version:
+ previous_versions = known_versions
+ else:
+ try:
+ previous_versions = list(utils.until(known_versions, target_version))
+ except ValueError:
+ raise ValueError(
+ "settings.TARGET_VERSION is improperly configured: "
+ "version {} not found.".format(target_version)
+ )
# should we set a version from settings ?
if force_version:
@@ -107,7 +110,7 @@ def get_best_schema_version(settings: configuration.Settings) -> versions.Versio
def get_fixtures_version(
- settings: configuration.Settings, target_version: versions.Version
+ settings: configuration.Settings, target_version: Optional[versions.Version]
) -> versions.Version:
"""
Get the closest fixtures to use to init a new DB
@@ -129,7 +132,7 @@ def get_fixtures_version(
def build_migration_plan(
- settings: configuration.Settings, schema_version: versions.Version
+ settings: configuration.Settings, from_version: versions.Version
) -> Iterable[Dict[str, Any]]:
"""
Return the list of migrations by version,
@@ -137,18 +140,21 @@ def build_migration_plan(
"""
# get known versions
known_versions = files.get_known_versions(settings=settings)
+ target_version = settings.TARGET_VERSION
# get all versions to apply
- try:
- versions_to_apply = list(utils.until(known_versions, settings.TARGET_VERSION))
- except ValueError:
- raise ValueError(
- "settings.TARGET_VERSION is improperly configured: "
- "version {} not found.".format(settings.TARGET_VERSION)
- )
+ if not target_version:
+ versions_to_apply = known_versions
+ else:
+ try:
+ versions_to_apply = list(utils.until(known_versions, target_version))
+ except ValueError:
+ raise ValueError(
+ "settings.TARGET_VERSION is improperly configured: "
+ "version {} not found.".format(target_version)
+ )
- if schema_version:
- versions_to_apply = list(utils.since(versions_to_apply, schema_version))
+ versions_to_apply = list(utils.since(versions_to_apply, from_version))
# get plan for each version to apply
for version in versions_to_apply:
@@ -179,22 +185,26 @@ def describe_migration_plan(
settings: configuration.Settings, stylist: style.Stylist = style.noop_stylist
) -> None:
- schema_version = get_best_schema_version(settings=settings)
- with stylist.activate("title") as echo:
- echo("Schema file version is {}".format(schema_version))
+ if not db.is_schema_initialized(settings=settings):
+ from_version = get_best_schema_version(settings=settings)
+ with stylist.activate("title") as echo:
+ echo("Schema file version is {}".format(from_version))
+ else:
+ _from_version = db.get_current_schema_version(settings=settings)
+ assert _from_version # mypy shenanigans
+ from_version = _from_version
+ with stylist.activate("title") as echo:
+ echo("Current version is {}".format(from_version))
- with stylist.activate("subtitle") as echo:
- echo(" Migrations will start after {}".format(schema_version))
+ target_version = settings.TARGET_VERSION
- current_version = db.get_current_schema_version(settings=settings)
with stylist.activate("title") as echo:
- echo("Current version is {}".format(current_version))
+ echo("Migrations will start from {}".format(from_version))
- target_version = settings.TARGET_VERSION
with stylist.activate("title") as echo:
- echo("Target version is {}".format(target_version))
+ echo(f"Target version is {target_version or 'latest'}")
- for plan in build_migration_plan(settings=settings, schema_version=schema_version):
+ for plan in build_migration_plan(settings=settings, from_version=from_version):
version = plan["version"]
migrations = plan["plan"]
diff --git a/septentrion/files.py b/septentrion/files.py
index a9da1de..49c41ad 100644
--- a/septentrion/files.py
+++ b/septentrion/files.py
@@ -23,7 +23,7 @@ def iter_files(
yield f
-def get_known_versions(settings: configuration.Settings) -> Iterable[versions.Version]:
+def get_known_versions(settings: configuration.Settings) -> List[versions.Version]:
"""
Return the list of the known versions defined in migration repository,
ordered.
diff --git a/septentrion/migration.py b/septentrion/migration.py
index 268acdd..b3580b1 100644
--- a/septentrion/migration.py
+++ b/septentrion/migration.py
@@ -26,20 +26,22 @@ def migrate(
logger.info("Starting migrations")
- schema_version = core.get_best_schema_version(settings=settings)
-
if not db.is_schema_initialized(settings=settings):
logger.info("Migration table is empty, loading a schema")
# schema not inited
+ schema_version = core.get_best_schema_version(settings=settings)
init_schema(settings=settings, init_version=schema_version, stylist=stylist)
+ from_version = schema_version
+ else:
+ _from_version = db.get_current_schema_version(settings=settings)
+ assert _from_version # mypy shenanigans
+ from_version = _from_version
# play migrations
with stylist.activate("title") as echo:
echo("Applying migrations")
- for plan in core.build_migration_plan(
- settings=settings, schema_version=schema_version
- ):
+ for plan in core.build_migration_plan(settings=settings, from_version=from_version):
version = plan["version"]
logger.info("Processing version %s", version)
with stylist.activate("subtitle") as echo:
diff --git a/setup.cfg b/setup.cfg
index 37b5391..863c5dc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -100,7 +100,7 @@ testpaths =
tests/integration
tests/acceptance
-[mypy-setuptools.*,colorama.*,psycopg2.*,sqlparse.*,importlib_metadata.*]
+[mypy]
ignore_missing_imports = True
[coverage:report]
|
peopledoc/septentrion
|
6a40051e0178829c0d0111e1b9d585380b3a9ffb
|
diff --git a/tests/integration/test_migration.py b/tests/integration/test_migration.py
index 8928742..a7aec36 100644
--- a/tests/integration/test_migration.py
+++ b/tests/integration/test_migration.py
@@ -75,118 +75,3 @@ def test_init_schema_extra_files(mocker):
),
]
assert calls == patch.call_args_list
-
-
-def test_migrate(mocker):
- mock_run_script = mocker.patch("septentrion.migration.run_script")
- mocker.patch("septentrion.db.is_schema_initialized", return_value=False)
- mock_init_schema = mocker.patch("septentrion.migration.init_schema")
- mocker.patch("septentrion.db.write_migration")
-
- mocker.patch(
- "septentrion.migration.core.build_migration_plan",
- return_value=[
- {
- "version": versions.Version(
- version_tuple=(0, 1), original_string="0.1"
- ),
- "plan": [],
- },
- {
- "version": versions.Version(
- version_tuple=(1, 0), original_string="1.0"
- ),
- "plan": [
- (
- "1.0-0-version-dml.sql",
- False,
- "example_migrations/1.0/1.0-0-version-dml.sql",
- False,
- ),
- (
- "1.0-author-1-ddl.sql",
- False,
- "example_migrations/1.0/1.0-author-1-ddl.sql",
- False,
- ),
- (
- "1.0-author-2-dml.sql",
- False,
- "example_migrations/1.0/1.0-author-2-dml.sql",
- False,
- ),
- (
- "1.0-book-1-ddl.sql",
- False,
- "example_migrations/1.0/1.0-book-1-ddl.sql",
- False,
- ),
- (
- "1.0-book-2-dml.sql",
- False,
- "example_migrations/1.0/1.0-book-2-dml.sql",
- False,
- ),
- ],
- },
- {
- "version": versions.Version(
- version_tuple=(1, 1), original_string="1.1"
- ),
- "plan": [
- (
- "1.1-0-version-dml.sql",
- False,
- "example_migrations/1.1/1.1-0-version-dml.sql",
- False,
- ),
- (
- "1.1-add-num-pages-1-ddl.sql",
- False,
- "example_migrations/1.1/1.1-add-num-pages-1-ddl.sql",
- False,
- ),
- (
- "1.1-add-num-pages-2-dml.sql",
- False,
- "example_migrations/1.1/1.1-add-num-pages-2-dml.sql",
- False,
- ),
- (
- "1.1-index-ddl.sql",
- False,
- "example_migrations/1.1/1.1-index-ddl.sql",
- False,
- ),
- ],
- },
- ],
- )
-
- settings = configuration.Settings(
- host="",
- port="",
- username="",
- dbname="",
- migrations_root="example_migrations",
- target_version=versions.Version.from_string("1.1"),
- )
-
- migration.migrate(settings=settings)
-
- mock_init_schema.assert_called_once()
- assert mock_run_script.call_args_list == [
- call(path="example_migrations/1.0/1.0-0-version-dml.sql", settings=settings),
- call(path="example_migrations/1.0/1.0-author-1-ddl.sql", settings=settings),
- call(path="example_migrations/1.0/1.0-author-2-dml.sql", settings=settings),
- call(path="example_migrations/1.0/1.0-book-1-ddl.sql", settings=settings),
- call(path="example_migrations/1.0/1.0-book-2-dml.sql", settings=settings),
- call(path="example_migrations/1.1/1.1-0-version-dml.sql", settings=settings),
- call(
- path="example_migrations/1.1/1.1-add-num-pages-1-ddl.sql", settings=settings
- ),
- call(
- path="example_migrations/1.1/1.1-add-num-pages-2-dml.sql", settings=settings
- ),
- call(path="example_migrations/1.1/1.1-index-ddl.sql", settings=settings),
- ]
diff --git a/tests/unit/test_core.py b/tests/unit/test_core.py
index 9a159f0..c3d3af9 100644
--- a/tests/unit/test_core.py
+++ b/tests/unit/test_core.py
@@ -160,10 +160,10 @@ def test_build_migration_plan_unknown_version(known_versions):
settings = configuration.Settings(
target_version=versions.Version.from_string("1.5")
)
- schema_version = versions.Version.from_string("0")
+ from_version = versions.Version.from_string("0")
with pytest.raises(ValueError):
- list(core.build_migration_plan(settings, schema_version=schema_version))
+ list(core.build_migration_plan(settings, from_version=from_version))
def test_build_migration_plan_ok(mocker, known_versions):
@@ -183,8 +183,8 @@ def test_build_migration_plan_ok(mocker, known_versions):
settings = configuration.Settings(
target_version=versions.Version.from_string("1.2")
)
- schema_version = versions.Version.from_string("0")
- plan = core.build_migration_plan(settings=settings, schema_version=schema_version)
+ from_version = versions.Version.from_string("0")
+ plan = core.build_migration_plan(settings=settings, from_version=from_version)
expected = [
{
@@ -234,8 +234,8 @@ def test_build_migration_plan_db_uptodate(mocker, known_versions):
target_version=versions.Version.from_string("1.2"),
)
- schema_version = versions.Version.from_string("0")
- plan = core.build_migration_plan(settings=settings, schema_version=schema_version)
+ from_version = versions.Version.from_string("0")
+ plan = core.build_migration_plan(settings=settings, from_version=from_version)
expected = [
{"plan": [], "version": versions.Version.from_string("1.1")},
@@ -247,13 +247,25 @@ def test_build_migration_plan_db_uptodate(mocker, known_versions):
def test_build_migration_plan_with_schema(mocker, known_versions):
mocker.patch("septentrion.core.db.get_applied_migrations", return_value=[])
settings = configuration.Settings(target_version="1.2")
- schema_version = versions.Version.from_string("1.1")
+ from_version = versions.Version.from_string("1.1")
- plan = list(
- core.build_migration_plan(settings=settings, schema_version=schema_version)
- )
+ plan = list(core.build_migration_plan(settings=settings, from_version=from_version))
+
+ expected = [
+ {"plan": [], "version": versions.Version.from_string("1.2")},
+ ]
+ assert list(plan) == expected
+
+
+def test_build_migration_plan_with_no_target_version(mocker, known_versions):
+ mocker.patch("septentrion.core.db.get_applied_migrations", return_value=[])
+ settings = configuration.Settings(target_version=None)
+ from_version = versions.Version.from_string("1.1")
+
+ plan = list(core.build_migration_plan(settings=settings, from_version=from_version))
expected = [
{"plan": [], "version": versions.Version.from_string("1.2")},
+ {"plan": [], "version": versions.Version.from_string("1.3")},
]
assert list(plan) == expected
diff --git a/tests/unit/test_migration.py b/tests/unit/test_migration.py
new file mode 100644
index 0000000..f15694a
--- /dev/null
+++ b/tests/unit/test_migration.py
@@ -0,0 +1,56 @@
+from septentrion import configuration, migration
+
+
+def test_migrate_uses_correct_version_with_db(mocker):
+ mocker.patch("septentrion.db.is_schema_initialized", return_value=True)
+ mock_init_schema = mocker.patch("septentrion.migration.init_schema")
+ current_version = mocker.patch("septentrion.db.get_current_schema_version")
+
+ build_migration_plan = mocker.patch(
+ "septentrion.migration.core.build_migration_plan",
+ return_value=[],
+ )
+
+ settings = configuration.Settings(
+ host="",
+ port="",
+ username="",
+ dbname="",
+ migrations_root="example_migrations",
+ target_version=None,
+ )
+
+ migration.migrate(settings=settings)
+
+ mock_init_schema.assert_not_called()
+ build_migration_plan.assert_called_with(
+ settings=settings, from_version=current_version.return_value
+ )
+
+
+def test_migrate_uses_correct_version_without_db(mocker):
+ mocker.patch("septentrion.db.is_schema_initialized", return_value=False)
+ mock_init_schema = mocker.patch("septentrion.migration.init_schema")
+ mocker.patch("septentrion.db.get_current_schema_version")
+ schema_version = mocker.patch("septentrion.core.get_best_schema_version")
+
+ build_migration_plan = mocker.patch(
+ "septentrion.migration.core.build_migration_plan",
+ return_value=[],
+ )
+
+ settings = configuration.Settings(
+ host="",
+ port="",
+ username="",
+ dbname="",
+ migrations_root="example_migrations",
+ target_version=None,
+ )
+
+ migration.migrate(settings=settings)
+
+ mock_init_schema.assert_called_once()
+ build_migration_plan.assert_called_with(
+ settings=settings, from_version=schema_version.return_value
+ )
|
target-version should be optional
It should default to the latest version
|
0.0
|
6a40051e0178829c0d0111e1b9d585380b3a9ffb
|
[
"tests/unit/test_core.py::test_build_migration_plan_unknown_version",
"tests/unit/test_core.py::test_build_migration_plan_ok",
"tests/unit/test_core.py::test_build_migration_plan_db_uptodate",
"tests/unit/test_core.py::test_build_migration_plan_with_schema",
"tests/unit/test_core.py::test_build_migration_plan_with_no_target_version",
"tests/unit/test_migration.py::test_migrate_uses_correct_version_with_db",
"tests/unit/test_migration.py::test_migrate_uses_correct_version_without_db"
] |
[
"tests/integration/test_migration.py::test_init_schema",
"tests/integration/test_migration.py::test_init_schema_extra_files",
"tests/unit/test_core.py::test_get_applied_versions",
"tests/unit/test_core.py::test_get_closest_version_unknown_target_version",
"tests/unit/test_core.py::test_get_closest_version_ok",
"tests/unit/test_core.py::test_get_closest_version_schema_doesnt_exist",
"tests/unit/test_core.py::test_get_closest_version_earlier_schema",
"tests/unit/test_core.py::test_get_closest_version_schema_force_ko",
"tests/unit/test_core.py::test_get_closest_version_schema_force_ok",
"tests/unit/test_core.py::test_get_closest_version_schema_force_dont_exist",
"tests/unit/test_core.py::test_get_best_schema_version_ok",
"tests/unit/test_core.py::test_get_best_schema_version_ko"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-03 09:47:17+00:00
|
mit
| 4,512 |
|
peopledoc__vault-cli-111
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9a4a438..ee0f141 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ CHANGELOG
- Add implicit and explicit caching (#102 / #103)
- Add `config_file=` in lib mode
- Add a `--flat` option to the `get-all` command (the new output uses full paths as dictionnary key)
+- Add support of templates when values are dict
0.9.0 (2019-07-11)
------------------
diff --git a/vault_cli/client.py b/vault_cli/client.py
index d50508d..74f89d5 100644
--- a/vault_cli/client.py
+++ b/vault_cli/client.py
@@ -422,6 +422,8 @@ class VaultClientBase:
template_prefix = "!template!"
def _render_template_value(self, secret: types.JSONValue) -> types.JSONValue:
+ if isinstance(secret, dict):
+ return {k: self._render_template_value(v) for k, v in secret.items()}
if not isinstance(secret, str):
return secret
|
peopledoc/vault-cli
|
4875b0082411df2e204568165d6f32cb9c365156
|
diff --git a/tests/unit/test_client_base.py b/tests/unit/test_client_base.py
index d8376e5..1bfff71 100644
--- a/tests/unit/test_client_base.py
+++ b/tests/unit/test_client_base.py
@@ -366,6 +366,14 @@ def test_vault_client_base_render_template_path_not_found(vault):
({"a": {"value": "!template!b"}, "b": {"value": "c"}}, "b"),
# Secret is a template
({"a": {"value": "!template!{{ vault('b') }}"}, "b": {"value": "c"}}, "c"),
+ # Secret is a dict with containing a template
+ (
+ {
+ "a": {"value": {"x": "!template!{{ vault('b') }}", "y": "yay"}},
+ "b": {"value": "c"},
+ },
+ {"x": "c", "y": "yay"},
+ ),
# Finite recursion
(
{
|
add support for templates in dict
Currently the "template" feature of vault-cli only works when the value is a string (`{"value": "!template!..."}`
We would like to also support templates when values are dict. For example:
```
{
"value": {
"a": "!template!...",
"b": "!template!...",
"c": "value",
}
}
```
|
0.0
|
4875b0082411df2e204568165d6f32cb9c365156
|
[
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents4-expected4]"
] |
[
"tests/unit/test_client_base.py::test_get_client",
"tests/unit/test_client_base.py::test_get_client_class",
"tests/unit/test_client_base.py::test_vault_client_base_call_init_client",
"tests/unit/test_client_base.py::test_vault_client_base_authenticate[test_kwargs0-expected0]",
"tests/unit/test_client_base.py::test_vault_client_base_authenticate[test_kwargs1-expected1]",
"tests/unit/test_client_base.py::test_vault_client_base_authenticate[test_kwargs2-expected2]",
"tests/unit/test_client_base.py::test_vault_client_base_username_without_password",
"tests/unit/test_client_base.py::test_vault_client_base_login_cert_without_key",
"tests/unit/test_client_base.py::test_vault_client_base_no_auth",
"tests/unit/test_client_base.py::test_vault_client_ca_bundle_verify[True-yay-yay]",
"tests/unit/test_client_base.py::test_vault_client_ca_bundle_verify[True-None-True]",
"tests/unit/test_client_base.py::test_vault_client_ca_bundle_verify[False-yay-False]",
"tests/unit/test_client_base.py::test_vault_client_base_browse_recursive_secrets",
"tests/unit/test_client_base.py::test_vault_client_base_browse_recursive_secrets_single_secret",
"tests/unit/test_client_base.py::test_vault_client_base_get_all_secrets",
"tests/unit/test_client_base.py::test_vault_client_base_get_all_secrets_flat",
"tests/unit/test_client_base.py::test_vault_client_base_get_secrets[a-expected0]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secrets[b-expected1]",
"tests/unit/test_client_base.py::test_vault_client_base_delete_all_secrets_generator",
"tests/unit/test_client_base.py::test_vault_client_base_delete_all_secrets_no_generator",
"tests/unit/test_client_base.py::test_vault_client_base_context_manager",
"tests/unit/test_client_base.py::test_vault_client_set_secret",
"tests/unit/test_client_base.py::test_vault_client_set_secret_overwrite_invalid[True-False]",
"tests/unit/test_client_base.py::test_vault_client_set_secret_overwrite_invalid[True-None]",
"tests/unit/test_client_base.py::test_vault_client_set_secret_overwrite_invalid[False-False]",
"tests/unit/test_client_base.py::test_vault_client_set_secret_overwrite_valid[True-True-c]",
"tests/unit/test_client_base.py::test_vault_client_set_secret_overwrite_valid[False-None-c]",
"tests/unit/test_client_base.py::test_vault_client_set_secret_overwrite_valid[True-None-d]",
"tests/unit/test_client_base.py::test_vault_client_set_secret_when_there_are_existing_secrets_beneath_path",
"tests/unit/test_client_base.py::test_vault_client_set_secret_when_a_parent_is_an_existing_secret",
"tests/unit/test_client_base.py::test_vault_client_set_secret_read_not_allowed",
"tests/unit/test_client_base.py::test_vault_client_set_secret_list_not_allowed",
"tests/unit/test_client_base.py::test_vault_client_set_secret_read_parent_not_allowed",
"tests/unit/test_client_base.py::test_vault_client_move_secrets",
"tests/unit/test_client_base.py::test_vault_client_move_secrets_generator",
"tests/unit/test_client_base.py::test_vault_client_move_secrets_overwrite_safe",
"tests/unit/test_client_base.py::test_vault_client_move_secrets_overwrite_force",
"tests/unit/test_client_base.py::test_vault_client_base_render_template",
"tests/unit/test_client_base.py::test_vault_client_base_render_template_path_not_found",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents0-b]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents1-expected1]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents2-b]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents3-c]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents5-d]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents6-<recursive",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret[vault_contents7-<recursive",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_template_root",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_no_value",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_with_dict",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_not_found",
"tests/unit/test_client_base.py::test_vault_client_base_lookup_token",
"tests/unit/test_client_base.py::test_vault_client_base_get_secrets_error",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[get_secret-params0-expected0]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[get_secret-params1-expected1]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[delete_secret-params2-expected2]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[delete_secret-params3-expected3]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[list_secrets-params4-expected4]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[list_secrets-params5-expected5]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[set_secret-params6-expected6]",
"tests/unit/test_client_base.py::test_vault_client_base_absolute_path[set_secret-params7-expected7]",
"tests/unit/test_client_base.py::test_vault_client_base_build_full_path[foo-/base/foo]",
"tests/unit/test_client_base.py::test_vault_client_base_build_full_path[/foo-/foo]",
"tests/unit/test_client_base.py::test_vault_client_base_base_path[foo-/foo/]",
"tests/unit/test_client_base.py::test_vault_client_base_base_path[foo/-/foo/]",
"tests/unit/test_client_base.py::test_vault_client_base_base_path[foo//-/foo/]",
"tests/unit/test_client_base.py::test_vault_client_base_base_path[/foo-/foo/]",
"tests/unit/test_client_base.py::test_vault_client_base_base_path[/foo/-/foo/]",
"tests/unit/test_client_base.py::test_vault_client_base_base_path[/foo//-/foo/]",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_implicit_cache_ends",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_implicit_cache_no_race_condition",
"tests/unit/test_client_base.py::test_vault_client_base_get_secrets_implicit_cache_no_race_condition",
"tests/unit/test_client_base.py::test_vault_client_base_get_secret_explicit_cache"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-13 08:51:55+00:00
|
apache-2.0
| 4,513 |
|
peopledoc__vault-cli-91
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 33edc55..3eaf296 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,11 @@
CHANGELOG
=========
+0.8.0 (unreleased)
+------------------
+
+- vault get defaults to text for string secrets, yaml for complex types (#87)
+
0.7.0 (2019-07-04)
------------------
diff --git a/README.md b/README.md
index 9656831..fb8bcf0 100644
--- a/README.md
+++ b/README.md
@@ -51,6 +51,7 @@ Options:
--config-file PATH Config file to use. Use 'no' to disable
config file. Default value: first of
./.vault.yml, ~/.vault.yml, /etc/vault.yml
+ -V, --version
-h, --help Show this message and exit.
Commands:
@@ -85,17 +86,17 @@ $ vault --url=https://vault.mydomain:8200 --certificate=/etc/vault/certificate.k
On the following examples, we'll be considering that we have a complete configuration file.
-### Read a secret (default is yaml format)
+### Read a secret in plain text (default)
```console
$ vault get my_secret
---- qwerty
-...
+qwerty
```
-### Read a secret in plain text
+### Read a secret in yaml format
```console
-$ vault get my_secret --text
-qwerty
+$ vault get --yaml my_secret
+--- qwerty
+...
```
### Write a secret
@@ -109,9 +110,9 @@ Done
$ export VAULT_CLI_BASE_PATH=myapp/
$ vault set /global_secret sharedsecret
Done
-$ vault get --text /global_secret
+$ vault get /global_secret
sharedsecret
-$ vault get --text global_secret
+$ vault get global_secret
Error: Secret not found
$ unset VAULT_CLI_BASE_PATH
```
@@ -127,7 +128,7 @@ $ vault set third_secret --stdin
<hit ctrl+d to end stdin>
Done
-vault get --text third_secret
+vault get third_secret
----BEGIN SECRET KEY----
...
```
@@ -144,7 +145,7 @@ Done
$ vault set -- -secret-name -oh-so-secret
Done
-$ vault get --text -- -secret-name
+$ vault get -- -secret-name
-oh-so-secret
```
@@ -159,6 +160,7 @@ Done
$ vault set list_secret secret1 secret2 secret3
Done
+# (For complex types, yaml format is selected)
$ vault get list_secret
---
- secret1
@@ -176,7 +178,7 @@ Error: Secret already exists at a. Use -f to force overwriting.
$ vault --safe-write set -f a c
Done
```
-(`safe-write` can be set in your configuration file, see below for details)
+(`safe-write` can be set in your configuration file, see details below)
### Get all values from the vault in a single command (yaml format)
```console
@@ -293,7 +295,7 @@ $ vault delete-all --force
```console
$ vault set password foo
$ vault set dsn '!template!proto://username:{{ vault("password") }}@host/'
-$ vault get --text dsn
+$ vault get dsn
proto://username:foo@host/
$ vault --no-render get --text dsn
!template!proto://username:{{ vault("password") }}@host/
diff --git a/vault_cli/cli.py b/vault_cli/cli.py
index 2caf22b..0a311fa 100644
--- a/vault_cli/cli.py
+++ b/vault_cli/cli.py
@@ -204,11 +204,11 @@ def get_all(client_obj: client.VaultClientBase, path: Sequence[str]):
@cli.command()
@click.pass_obj
@click.option(
- "--text",
- is_flag=True,
+ "--text/--yaml",
+ default=True,
help=(
- "--text implies --without-key. Returns the value in "
- "plain text format instead of yaml."
+ "Returns the value in yaml format instead of plain text."
+ "If the secret is not a string, it will always be yaml."
),
)
@click.argument("name")
@@ -218,7 +218,10 @@ def get(client_obj: client.VaultClientBase, text: bool, name: str):
Return a single secret value.
"""
secret = client_obj.get_secret(path=name)
- if text:
+ force_yaml = isinstance(secret, list) or isinstance(secret, dict)
+ if text and not force_yaml:
+ if secret is None:
+ secret = "null"
click.echo(secret)
return
|
peopledoc/vault-cli
|
3d9b7015383e9b97f4b4c617cd7d1ad34acc0ef8
|
diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py
index b8eaaa9..537da4b 100644
--- a/tests/integration/test_integration.py
+++ b/tests/integration/test_integration.py
@@ -26,6 +26,10 @@ def test_integration_cli(cli_runner, clean_vault):
assert call(cli_runner, ["get", "a", "--text"]).output == "b\n"
+ assert call(cli_runner, ["get", "a"]).output == "b\n"
+
+ assert call(cli_runner, ["get", "a", "--yaml"]).output == "--- b\n...\n"
+
# Both testing it and using it to clean the vault
call(cli_runner, ["delete-all", "--force"])
@@ -37,7 +41,7 @@ def test_integration_cli(cli_runner, clean_vault):
call(cli_runner, ["set", "c/d", "e"])
- assert call(cli_runner, ["get", "c/d"]).output == "--- e\n...\n"
+ assert call(cli_runner, ["get", "c/d"]).output == "e\n"
assert call(cli_runner, ["list"]).output == "a\nc/\n"
diff --git a/tests/unit/test_cli.py b/tests/unit/test_cli.py
index ce6f235..469758e 100644
--- a/tests/unit/test_cli.py
+++ b/tests/unit/test_cli.py
@@ -82,20 +82,34 @@ def test_list(cli_runner, vault_with_token):
assert result.exit_code == 0
-def test_get_text(cli_runner, vault_with_token):
[email protected]("extra_args", [["--text"], []])
+def test_get_text(cli_runner, vault_with_token, extra_args):
vault_with_token.db = {"a": "bar"}
- result = cli_runner.invoke(cli.cli, ["get", "a", "--text"])
+ result = cli_runner.invoke(cli.cli, ["get", "a"] + extra_args)
assert result.output == "bar\n"
assert result.exit_code == 0
[email protected](
+ "input, output",
+ [([1, 2], "---\n- 1\n- 2\n"), ({"a": "b"}, "---\na: b\n"), (None, "null\n")],
+)
+def test_get_text_special_cases(cli_runner, vault_with_token, input, output):
+
+ vault_with_token.db = {"a": input}
+ result = cli_runner.invoke(cli.cli, ["get", "a"])
+
+ assert result.output == output
+ assert result.exit_code == 0
+
+
def test_get_yaml(cli_runner, vault_with_token):
vault_with_token.db = {"a": "bar"}
- result = cli_runner.invoke(cli.cli, ["get", "a"])
+ result = cli_runner.invoke(cli.cli, ["get", "a", "--yaml"])
- assert yaml.safe_load(result.output) == "bar"
+ assert result.output == "--- bar\n...\n"
assert result.exit_code == 0
|
vault get should be --text by default, --yaml if explicitely requested
This would make thing so much simpler.
@pilou- do you think it would change something to the role you developed ? (we can discuss this in a private space if you want)
|
0.0
|
3d9b7015383e9b97f4b4c617cd7d1ad34acc0ef8
|
[
"tests/unit/test_cli.py::test_get_text[extra_args1]",
"tests/unit/test_cli.py::test_get_text_special_cases[None-null\\n]",
"tests/unit/test_cli.py::test_get_yaml"
] |
[
"tests/unit/test_cli.py::test_options",
"tests/unit/test_cli.py::test_list",
"tests/unit/test_cli.py::test_get_text[extra_args0]",
"tests/unit/test_cli.py::test_get_text_special_cases[input0----\\n-",
"tests/unit/test_cli.py::test_get_text_special_cases[input1----\\na:",
"tests/unit/test_cli.py::test_get_all",
"tests/unit/test_cli.py::test_set",
"tests/unit/test_cli.py::test_set_arg_stdin",
"tests/unit/test_cli.py::test_set_stdin",
"tests/unit/test_cli.py::test_set_stdin_yaml",
"tests/unit/test_cli.py::test_set_list",
"tests/unit/test_cli.py::test_set_yaml",
"tests/unit/test_cli.py::test_set_overwrite_valid[args0-b]",
"tests/unit/test_cli.py::test_set_overwrite_valid[args1-b]",
"tests/unit/test_cli.py::test_set_overwrite_valid[args2-b]",
"tests/unit/test_cli.py::test_set_overwrite_valid[args3-c]",
"tests/unit/test_cli.py::test_set_overwrite_safe_invalid[args0]",
"tests/unit/test_cli.py::test_set_overwrite_safe_invalid[args1]",
"tests/unit/test_cli.py::test_set_mix_secrets_folders",
"tests/unit/test_cli.py::test_set_mix_folders_secrets",
"tests/unit/test_cli.py::test_delete",
"tests/unit/test_cli.py::test_env",
"tests/unit/test_cli.py::test_env_prefix",
"tests/unit/test_cli.py::test_main",
"tests/unit/test_cli.py::test_load_config_no_config",
"tests/unit/test_cli.py::test_load_config[bla-expected0]",
"tests/unit/test_cli.py::test_load_config[None-expected1]",
"tests/unit/test_cli.py::test_extract_special_args[config0-environ0-expected0]",
"tests/unit/test_cli.py::test_extract_special_args[config1-environ1-expected1]",
"tests/unit/test_cli.py::test_extract_special_args[config2-environ2-expected2]",
"tests/unit/test_cli.py::test_extract_special_args[config3-environ3-expected3]",
"tests/unit/test_cli.py::test_extract_special_args[config4-environ4-expected4]",
"tests/unit/test_cli.py::test_extract_special_args[config5-environ5-expected5]",
"tests/unit/test_cli.py::test_extract_special_args[config6-environ6-expected6]",
"tests/unit/test_cli.py::test_extract_special_args[config7-environ7-expected7]",
"tests/unit/test_cli.py::test_set_verbosity",
"tests/unit/test_cli.py::test_dump_config",
"tests/unit/test_cli.py::test_delete_all",
"tests/unit/test_cli.py::test_delete_all_cancel",
"tests/unit/test_cli.py::test_delete_all_force",
"tests/unit/test_cli.py::test_mv",
"tests/unit/test_cli.py::test_mv_overwrite_safe",
"tests/unit/test_cli.py::test_mv_overwrite_force",
"tests/unit/test_cli.py::test_mv_mix_folders_secrets",
"tests/unit/test_cli.py::test_mv_mix_secrets_folders",
"tests/unit/test_cli.py::test_template",
"tests/unit/test_cli.py::test_lookup_token",
"tests/unit/test_cli.py::test_handle_errors",
"tests/unit/test_cli.py::test_version"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-05 08:46:51+00:00
|
apache-2.0
| 4,514 |
|
pepkit__peppy-446
|
diff --git a/docs/changelog.md b/docs/changelog.md
index 22ea48b..3ecc382 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -2,6 +2,16 @@
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format.
+## [0.35.6] -- 2023-06-27
+### Added
+- `orient` argument to `to_dict` method
+
+### Fixed
+- The name of the raw subsample object to match the actual name (list). Commit: #442
+
+### Changed
+- Reduced the number of items returned in the to_dict(extended=True) method to 3, with the name and description now stored in the config key.
+
## [0.35.5] -- 2023-03-27
### Fixed
- A [bug](https://github.com/pepkit/peppy/issues/435) with custom sample ids
diff --git a/peppy/_version.py b/peppy/_version.py
index 322d9f8..a51f1e9 100644
--- a/peppy/_version.py
+++ b/peppy/_version.py
@@ -1,1 +1,1 @@
-__version__ = "0.35.5"
+__version__ = "0.35.6"
diff --git a/peppy/const.py b/peppy/const.py
index bf6cc1b..0f009a5 100644
--- a/peppy/const.py
+++ b/peppy/const.py
@@ -119,6 +119,6 @@ OTHER_CONSTANTS = [
PEP_LATEST_VERSION = "2.1.0"
SAMPLE_RAW_DICT_KEY = "_sample_dict"
-SUBSAMPLE_RAW_DICT_KEY = "_subsample_dict"
+SUBSAMPLE_RAW_LIST_KEY = "_subsample_list"
__all__ = PROJECT_CONSTANTS + SAMPLE_CONSTANTS + OTHER_CONSTANTS
diff --git a/peppy/project.py b/peppy/project.py
index ee378e7..665d4b2 100644
--- a/peppy/project.py
+++ b/peppy/project.py
@@ -6,7 +6,7 @@ import os, sys
from collections.abc import Mapping
from contextlib import suppress
from logging import getLogger
-from typing import Dict, Iterable, List, Tuple, Union
+from typing import Dict, Iterable, List, Tuple, Union, Literal, NoReturn
import numpy as np
import pandas as pd
@@ -57,7 +57,7 @@ from .const import (
SAMPLE_TABLE_INDEX_KEY,
SUBSAMPLE_DF_KEY,
SUBSAMPLE_NAME_ATTR,
- SUBSAMPLE_RAW_DICT_KEY,
+ SUBSAMPLE_RAW_LIST_KEY,
SUBSAMPLE_TABLE_INDEX_KEY,
SUBSAMPLE_TABLES_FILE_KEY,
)
@@ -208,17 +208,19 @@ class Project(PathExAttMap):
self[SAMPLE_DF_KEY] = pd.DataFrame(pep_dictionary[SAMPLE_RAW_DICT_KEY])
self[CONFIG_KEY] = pep_dictionary[CONFIG_KEY]
- if SUBSAMPLE_RAW_DICT_KEY in pep_dictionary:
- if pep_dictionary[SUBSAMPLE_RAW_DICT_KEY]:
+ if SUBSAMPLE_RAW_LIST_KEY in pep_dictionary:
+ if pep_dictionary[SUBSAMPLE_RAW_LIST_KEY]:
self[SUBSAMPLE_DF_KEY] = [
pd.DataFrame(sub_a)
- for sub_a in pep_dictionary[SUBSAMPLE_RAW_DICT_KEY]
+ for sub_a in pep_dictionary[SUBSAMPLE_RAW_LIST_KEY]
]
- if NAME_KEY in pep_dictionary:
- self[NAME_KEY] = pep_dictionary[NAME_KEY]
+ if NAME_KEY in self[CONFIG_KEY]:
+ self[NAME_KEY] = self[CONFIG_KEY][NAME_KEY]
- if DESC_KEY in pep_dictionary:
- self[DESC_KEY] = pep_dictionary[DESC_KEY]
+ if DESC_KEY in self[CONFIG_KEY]:
+ self[DESC_KEY] = self[CONFIG_KEY][DESC_KEY]
+
+ self._set_indexes(self[CONFIG_KEY])
self.create_samples(modify=False if self[SAMPLE_TABLE_FILE_KEY] else True)
self._sample_table = self._get_table_from_samples(
@@ -227,25 +229,35 @@ class Project(PathExAttMap):
return self
- def to_dict(self, expand: bool = False, extended: bool = False) -> dict:
+ def to_dict(
+ self,
+ expand: bool = False,
+ extended: bool = False,
+ orient: Literal[
+ "dict", "list", "series", "split", "tight", "records", "index"
+ ] = "dict",
+ ) -> dict:
"""
Convert the Project object to a dictionary.
:param bool expand: whether to expand the paths
:param bool extended: whether to produce complete project dict (used to reinit the project)
+ :param Literal orient: orientation of the returned df
:return dict: a dictionary representation of the Project object
"""
if extended:
if self[SUBSAMPLE_DF_KEY] is not None:
- sub_df = [sub_a.to_dict() for sub_a in self[SUBSAMPLE_DF_KEY]]
+ sub_df = [
+ sub_a.to_dict(orient=orient) for sub_a in self[SUBSAMPLE_DF_KEY]
+ ]
else:
sub_df = None
+ self[CONFIG_KEY][NAME_KEY] = self[NAME_KEY]
+ self[CONFIG_KEY][DESC_KEY] = self[DESC_KEY]
p_dict = {
- SAMPLE_RAW_DICT_KEY: self[SAMPLE_DF_KEY].to_dict(),
- CONFIG_KEY: dict(self[CONFIG_KEY]),
- SUBSAMPLE_RAW_DICT_KEY: sub_df,
- NAME_KEY: self[NAME_KEY],
- DESC_KEY: self[DESC_KEY],
+ SAMPLE_RAW_DICT_KEY: self[SAMPLE_DF_KEY].to_dict(orient=orient),
+ CONFIG_KEY: self[CONFIG_KEY].to_dict(expand=expand),
+ SUBSAMPLE_RAW_LIST_KEY: sub_df,
}
else:
p_dict = self.config.to_dict(expand=expand)
@@ -258,6 +270,9 @@ class Project(PathExAttMap):
Populate Project with Sample objects
"""
self._samples: List[Sample] = self.load_samples()
+ if self.samples is None:
+ _LOGGER.info("No samples found in the project.")
+
if modify:
self.modify_samples()
else:
@@ -326,14 +341,7 @@ class Project(PathExAttMap):
_LOGGER.debug("Raw ({}) config data: {}".format(cfg_path, config))
- self.st_index = (
- config[SAMPLE_TABLE_INDEX_KEY] if SAMPLE_TABLE_INDEX_KEY in config else None
- )
- self.sst_index = (
- config[SUBSAMPLE_TABLE_INDEX_KEY]
- if SUBSAMPLE_TABLE_INDEX_KEY in config
- else None
- )
+ self._set_indexes(config)
# recursively import configs
if (
PROJ_MODS_KEY in config
@@ -388,6 +396,23 @@ class Project(PathExAttMap):
relative_vars = [CFG_SAMPLE_TABLE_KEY, CFG_SUBSAMPLE_TABLE_KEY]
_make_sections_absolute(self[CONFIG_KEY], relative_vars, cfg_path)
+ def _set_indexes(self, config: Mapping) -> NoReturn:
+ """
+ Set sample and subsample indexes if they are different then Default
+
+ :param config: project config
+ """
+ self.st_index = (
+ config[SAMPLE_TABLE_INDEX_KEY]
+ if SAMPLE_TABLE_INDEX_KEY in config
+ else SAMPLE_NAME_ATTR
+ )
+ self.sst_index = (
+ config[SUBSAMPLE_TABLE_INDEX_KEY]
+ if SUBSAMPLE_TABLE_INDEX_KEY in config
+ else SUBSAMPLE_NAME_ATTR
+ )
+
def load_samples(self):
"""
Read the sample_table and subsample_tables into dataframes
|
pepkit/peppy
|
efec4b99704d796d615311115a4ff2178b35aff3
|
diff --git a/tests/test_Project.py b/tests/test_Project.py
index b585e04..751c4ef 100644
--- a/tests/test_Project.py
+++ b/tests/test_Project.py
@@ -553,13 +553,18 @@ class TestPostInitSampleCreation:
p2 = Project(cfg=example_pep_csv_path)
assert not p1 == p2
- @pytest.mark.parametrize("example_pep_cfg_path", ["append"], indirect=True)
- def test_from_dict(self, example_pep_cfg_path):
+ @pytest.mark.parametrize(
+ "example_pep_cfg_path",
+ ["append", "custom_index", "imply", "subtables"],
+ indirect=True,
+ )
+ @pytest.mark.parametrize("orient", ["dict", "records"])
+ def test_from_dict(self, example_pep_cfg_path, orient):
"""
Test initializing project from dict
"""
p1 = Project(cfg=example_pep_cfg_path)
- p1_dict = p1.to_dict(extended=True)
+ p1_dict = p1.to_dict(extended=True, orient=orient)
del p1_dict["_config"]["sample_table"]
p2 = Project().from_dict(p1_dict)
assert p1 == p2
|
If 'annotation_sheet.csv' is without samples, throws 'NoneType' error
I decided to remove the samples in the annotation_sheet.csv to see what would happen. Error was thrown - `TypeError: 'NoneType' object is not iterable.` It would be nice to have it respond with `No samples found in your file` or something.
|
0.0
|
efec4b99704d796d615311115a4ff2178b35aff3
|
[
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[dict-append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[dict-custom_index]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[dict-imply]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[dict-subtables]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[records-append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[records-custom_index]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[records-imply]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[records-subtables]"
] |
[
"tests/test_Project.py::TestProjectConstructor::test_empty",
"tests/test_Project.py::TestProjectConstructor::test_nonexistent",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[basic-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[basic-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[append-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[append-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments1-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments1-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments2-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments2-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive_imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive_imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[duplicate-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[duplicate-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imports-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imports-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable1-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable1-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable2-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable2-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable3-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable3-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable4-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable4-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable5-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable5-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[remove-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[remove-True]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_basic/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_derive/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imply/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imports/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_basic/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_derive/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imply/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imports/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_csv_init_autodetect[basic]",
"tests/test_Project.py::TestProjectConstructor::test_csv_init_autodetect[imply]",
"tests/test_Project.py::TestProjectConstructor::test_remote_csv_init_autodetect[https://raw.githubusercontent.com/pepkit/example_peps/master/example_basic/sample_table.csv]",
"tests/test_Project.py::TestProjectConstructor::test_remote_csv_init_autodetect[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imply/sample_table.csv]",
"tests/test_Project.py::TestProjectConstructor::test_automerge[automerge]",
"tests/test_Project.py::TestProjectConstructor::test_automerge_csv[automerge]",
"tests/test_Project.py::TestProjectConstructor::test_automerge_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_automerge/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_automerge_disallowed_with_subsamples[subtable_automerge]",
"tests/test_Project.py::TestProjectConstructor::test_amendments[amendments1-False]",
"tests/test_Project.py::TestProjectConstructor::test_amendments[amendments1-True]",
"tests/test_Project.py::TestProjectConstructor::test_subsample_table_works_when_no_sample_mods[subtable1]",
"tests/test_Project.py::TestProjectConstructor::test_cutsom_sample_table_index_config[custom_index]",
"tests/test_Project.py::TestProjectConstructor::test_cutsom_sample_table_index_constructor[custom_index]",
"tests/test_Project.py::TestProjectConstructor::test_subsample_table_multiple[subtables]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[basic-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[basic-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[append-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[append-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments1-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments1-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments2-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments2-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive_imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive_imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[duplicate-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[duplicate-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imports-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imports-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable1-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable1-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable2-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable2-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable3-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable3-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable4-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable4-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable5-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable5-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[remove-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[remove-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[append-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name_derive[project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name[project_config_noname.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name_defer[project_config_noname.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name_custom_index[project_config_noname.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_equality[basic]",
"tests/test_Project.py::TestProjectConstructor::test_inequality[example_peps_cfg_paths0]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[basic]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[derive]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[imply]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[append]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[amendments1]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[amendments2]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[derive_imply]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[duplicate]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[imports]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable1]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable2]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable3]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable4]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable5]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[remove]",
"tests/test_Project.py::TestProjectConstructor::test_to_dict_does_not_create_nans",
"tests/test_Project.py::TestProjectConstructor::test_missing_version[missing_version]",
"tests/test_Project.py::TestProjectConstructor::test_sample_table_version[basic]",
"tests/test_Project.py::TestProjectConstructor::test_auto_merge_duplicated_names_works_for_different_read_types[nextflow_samplesheet]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-sample]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-instrument_platform]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-run_accession]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-fastq_1]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-fastq_2]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-fasta]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_activation_interactive[amendments1]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_deactivation_interactive[amendments1]",
"tests/test_Project.py::TestProjectManipulationTests::test_missing_amendment_raises_correct_exception[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_missing_amendment_raises_correct_exception[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_argument_cant_be_null[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_argument_cant_be_null[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[basic-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[basic-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imply-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imply-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[append-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[append-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments2-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments2-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive_imply-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive_imply-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[duplicate-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[duplicate-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imports-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imports-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable2-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable2-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable3-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable3-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable4-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable4-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable5-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable5-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[remove-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[remove-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_listing[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_listing[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_sample_updates_regenerate_df[basic]",
"tests/test_Project.py::TestProjectManipulationTests::test_subsample_table_property[subtable1]",
"tests/test_Project.py::TestProjectManipulationTests::test_get_sample[basic]",
"tests/test_Project.py::TestProjectManipulationTests::test_get_sample_nonexistent[basic]",
"tests/test_Project.py::TestPostInitSampleCreation::test_append[append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_imports[imports]",
"tests/test_Project.py::TestPostInitSampleCreation::test_imply[imply]",
"tests/test_Project.py::TestPostInitSampleCreation::test_duplicate[duplicate]",
"tests/test_Project.py::TestPostInitSampleCreation::test_derive[derive]",
"tests/test_Project.py::TestPostInitSampleCreation::test_equality[append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_unequality[derive-append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_pandas[append-append]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-22 21:21:25+00:00
|
bsd-2-clause
| 4,515 |
|
pepkit__peppy-450
|
diff --git a/docs/changelog.md b/docs/changelog.md
index 22ea48b..3ecc382 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -2,6 +2,16 @@
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format.
+## [0.35.6] -- 2023-06-27
+### Added
+- `orient` argument to `to_dict` method
+
+### Fixed
+- The name of the raw subsample object to match the actual name (list). Commit: #442
+
+### Changed
+- Reduced the number of items returned in the to_dict(extended=True) method to 3, with the name and description now stored in the config key.
+
## [0.35.5] -- 2023-03-27
### Fixed
- A [bug](https://github.com/pepkit/peppy/issues/435) with custom sample ids
diff --git a/peppy/_version.py b/peppy/_version.py
index 322d9f8..a51f1e9 100644
--- a/peppy/_version.py
+++ b/peppy/_version.py
@@ -1,1 +1,1 @@
-__version__ = "0.35.5"
+__version__ = "0.35.6"
diff --git a/peppy/const.py b/peppy/const.py
index bf6cc1b..0f009a5 100644
--- a/peppy/const.py
+++ b/peppy/const.py
@@ -119,6 +119,6 @@ OTHER_CONSTANTS = [
PEP_LATEST_VERSION = "2.1.0"
SAMPLE_RAW_DICT_KEY = "_sample_dict"
-SUBSAMPLE_RAW_DICT_KEY = "_subsample_dict"
+SUBSAMPLE_RAW_LIST_KEY = "_subsample_list"
__all__ = PROJECT_CONSTANTS + SAMPLE_CONSTANTS + OTHER_CONSTANTS
diff --git a/peppy/project.py b/peppy/project.py
index d16d470..9e21364 100644
--- a/peppy/project.py
+++ b/peppy/project.py
@@ -6,7 +6,7 @@ import os, sys
from collections.abc import Mapping
from contextlib import suppress
from logging import getLogger
-from typing import Dict, Iterable, List, Tuple, Union
+from typing import Dict, Iterable, List, Tuple, Union, Literal
import numpy as np
import pandas as pd
@@ -57,7 +57,7 @@ from .const import (
SAMPLE_TABLE_INDEX_KEY,
SUBSAMPLE_DF_KEY,
SUBSAMPLE_NAME_ATTR,
- SUBSAMPLE_RAW_DICT_KEY,
+ SUBSAMPLE_RAW_LIST_KEY,
SUBSAMPLE_TABLE_INDEX_KEY,
SUBSAMPLE_TABLES_FILE_KEY,
)
@@ -208,17 +208,17 @@ class Project(PathExAttMap):
self[SAMPLE_DF_KEY] = pd.DataFrame(pep_dictionary[SAMPLE_RAW_DICT_KEY])
self[CONFIG_KEY] = pep_dictionary[CONFIG_KEY]
- if SUBSAMPLE_RAW_DICT_KEY in pep_dictionary:
- if pep_dictionary[SUBSAMPLE_RAW_DICT_KEY]:
+ if SUBSAMPLE_RAW_LIST_KEY in pep_dictionary:
+ if pep_dictionary[SUBSAMPLE_RAW_LIST_KEY]:
self[SUBSAMPLE_DF_KEY] = [
pd.DataFrame(sub_a)
- for sub_a in pep_dictionary[SUBSAMPLE_RAW_DICT_KEY]
+ for sub_a in pep_dictionary[SUBSAMPLE_RAW_LIST_KEY]
]
- if NAME_KEY in pep_dictionary:
- self[NAME_KEY] = pep_dictionary[NAME_KEY]
+ if NAME_KEY in self[CONFIG_KEY]:
+ self[NAME_KEY] = self[CONFIG_KEY][NAME_KEY]
- if DESC_KEY in pep_dictionary:
- self[DESC_KEY] = pep_dictionary[DESC_KEY]
+ if DESC_KEY in self[CONFIG_KEY]:
+ self[DESC_KEY] = self[CONFIG_KEY][DESC_KEY]
self.create_samples(modify=False if self[SAMPLE_TABLE_FILE_KEY] else True)
self._sample_table = self._get_table_from_samples(
@@ -227,25 +227,35 @@ class Project(PathExAttMap):
return self
- def to_dict(self, expand: bool = False, extended: bool = False) -> dict:
+ def to_dict(
+ self,
+ expand: bool = False,
+ extended: bool = False,
+ orient: Literal[
+ "dict", "list", "series", "split", "tight", "records", "index"
+ ] = "dict",
+ ) -> dict:
"""
Convert the Project object to a dictionary.
:param bool expand: whether to expand the paths
:param bool extended: whether to produce complete project dict (used to reinit the project)
+ :param Literal orient: orientation of the returned df
:return dict: a dictionary representation of the Project object
"""
if extended:
if self[SUBSAMPLE_DF_KEY] is not None:
- sub_df = [sub_a.to_dict() for sub_a in self[SUBSAMPLE_DF_KEY]]
+ sub_df = [
+ sub_a.to_dict(orient=orient) for sub_a in self[SUBSAMPLE_DF_KEY]
+ ]
else:
sub_df = None
+ self[CONFIG_KEY][NAME_KEY] = self[NAME_KEY]
+ self[CONFIG_KEY][DESC_KEY] = self[DESC_KEY]
p_dict = {
- SAMPLE_RAW_DICT_KEY: self[SAMPLE_DF_KEY].to_dict(),
- CONFIG_KEY: self[CONFIG_KEY],
- SUBSAMPLE_RAW_DICT_KEY: sub_df,
- NAME_KEY: self[NAME_KEY],
- DESC_KEY: self[DESC_KEY],
+ SAMPLE_RAW_DICT_KEY: self[SAMPLE_DF_KEY].to_dict(orient=orient),
+ CONFIG_KEY: self[CONFIG_KEY].to_dict(expand=expand),
+ SUBSAMPLE_RAW_LIST_KEY: sub_df,
}
else:
p_dict = self.config.to_dict(expand=expand)
|
pepkit/peppy
|
8449635bb35e16748e693bdd2c4820502d7b448d
|
diff --git a/tests/test_Project.py b/tests/test_Project.py
index b585e04..85a1c48 100644
--- a/tests/test_Project.py
+++ b/tests/test_Project.py
@@ -553,13 +553,16 @@ class TestPostInitSampleCreation:
p2 = Project(cfg=example_pep_csv_path)
assert not p1 == p2
- @pytest.mark.parametrize("example_pep_cfg_path", ["append"], indirect=True)
- def test_from_dict(self, example_pep_cfg_path):
+ @pytest.mark.parametrize(
+ "example_pep_cfg_path", ["append", "subtable2"], indirect=True
+ )
+ @pytest.mark.parametrize("orient", ["dict", "records"])
+ def test_from_dict(self, example_pep_cfg_path, orient):
"""
Test initializing project from dict
"""
p1 = Project(cfg=example_pep_cfg_path)
- p1_dict = p1.to_dict(extended=True)
+ p1_dict = p1.to_dict(extended=True, orient=orient)
del p1_dict["_config"]["sample_table"]
p2 = Project().from_dict(p1_dict)
assert p1 == p2
|
Add orient argument to `to_dict` method
It would be niece to add `orient` argument, that sets orientation of the output structure of the samples and subsamples
https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html
Related issue: https://github.com/pepkit/pephub/issues/186
|
0.0
|
8449635bb35e16748e693bdd2c4820502d7b448d
|
[
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[dict-append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[dict-subtable2]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[records-append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_dict[records-subtable2]"
] |
[
"tests/test_Project.py::TestProjectConstructor::test_empty",
"tests/test_Project.py::TestProjectConstructor::test_nonexistent",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[basic-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[basic-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[append-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[append-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments1-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments1-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments2-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[amendments2-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive_imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[derive_imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[duplicate-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[duplicate-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imports-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[imports-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable1-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable1-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable2-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable2-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable3-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable3-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable4-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable4-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable5-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[subtable5-True]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[remove-False]",
"tests/test_Project.py::TestProjectConstructor::test_instantiaion[remove-True]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_basic/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_derive/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imply/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imports/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_basic/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_derive/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imply/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_remote_simulate_no_network[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imports/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_csv_init_autodetect[basic]",
"tests/test_Project.py::TestProjectConstructor::test_csv_init_autodetect[imply]",
"tests/test_Project.py::TestProjectConstructor::test_remote_csv_init_autodetect[https://raw.githubusercontent.com/pepkit/example_peps/master/example_basic/sample_table.csv]",
"tests/test_Project.py::TestProjectConstructor::test_remote_csv_init_autodetect[https://raw.githubusercontent.com/pepkit/example_peps/master/example_imply/sample_table.csv]",
"tests/test_Project.py::TestProjectConstructor::test_automerge[automerge]",
"tests/test_Project.py::TestProjectConstructor::test_automerge_csv[automerge]",
"tests/test_Project.py::TestProjectConstructor::test_automerge_remote[https://raw.githubusercontent.com/pepkit/example_peps/master/example_automerge/project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_automerge_disallowed_with_subsamples[subtable_automerge]",
"tests/test_Project.py::TestProjectConstructor::test_amendments[amendments1-False]",
"tests/test_Project.py::TestProjectConstructor::test_amendments[amendments1-True]",
"tests/test_Project.py::TestProjectConstructor::test_subsample_table_works_when_no_sample_mods[subtable1]",
"tests/test_Project.py::TestProjectConstructor::test_cutsom_sample_table_index_config[custom_index]",
"tests/test_Project.py::TestProjectConstructor::test_cutsom_sample_table_index_constructor[custom_index]",
"tests/test_Project.py::TestProjectConstructor::test_subsample_table_multiple[subtables]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[basic-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[basic-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[append-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[append-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments1-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments1-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments2-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[amendments2-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive_imply-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[derive_imply-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[duplicate-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[duplicate-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imports-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[imports-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable1-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable1-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable2-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable2-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable3-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable3-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable4-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable4-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable5-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[subtable5-True]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[remove-False]",
"tests/test_Project.py::TestProjectConstructor::test_no_description[remove-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[basic-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imply-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[append-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[append-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments1-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[amendments2-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[derive_imply-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[duplicate-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[imports-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable1-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable2-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable3-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable4-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[subtable5-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-desc1-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-desc1-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-desc",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-11-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-11-True]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-None-False]",
"tests/test_Project.py::TestProjectConstructor::test_description[remove-None-True]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name_derive[project_config.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name[project_config_noname.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name_defer[project_config_noname.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_missing_sample_name_custom_index[project_config_noname.yaml]",
"tests/test_Project.py::TestProjectConstructor::test_equality[basic]",
"tests/test_Project.py::TestProjectConstructor::test_inequality[example_peps_cfg_paths0]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[basic]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[derive]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[imply]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[append]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[amendments1]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[amendments2]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[derive_imply]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[duplicate]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[imports]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable1]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable2]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable3]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable4]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[subtable5]",
"tests/test_Project.py::TestProjectConstructor::test_from_dict_instatiation[remove]",
"tests/test_Project.py::TestProjectConstructor::test_to_dict_does_not_create_nans",
"tests/test_Project.py::TestProjectConstructor::test_missing_version[missing_version]",
"tests/test_Project.py::TestProjectConstructor::test_sample_table_version[basic]",
"tests/test_Project.py::TestProjectConstructor::test_auto_merge_duplicated_names_works_for_different_read_types[nextflow_samplesheet]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-sample]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-instrument_platform]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-run_accession]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-fastq_1]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-fastq_2]",
"tests/test_Project.py::TestProjectConstructor::test_peppy_initializes_samples_with_correct_attributes[nextflow_config-fasta]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_activation_interactive[amendments1]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_deactivation_interactive[amendments1]",
"tests/test_Project.py::TestProjectManipulationTests::test_missing_amendment_raises_correct_exception[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_missing_amendment_raises_correct_exception[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_argument_cant_be_null[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_argument_cant_be_null[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[basic-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[basic-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imply-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imply-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[append-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[append-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments2-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[amendments2-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive_imply-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[derive_imply-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[duplicate-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[duplicate-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imports-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[imports-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable2-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable2-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable3-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable3-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable4-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable4-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable5-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[subtable5-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[remove-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_str_repr_correctness[remove-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_listing[amendments1-False]",
"tests/test_Project.py::TestProjectManipulationTests::test_amendments_listing[amendments1-True]",
"tests/test_Project.py::TestProjectManipulationTests::test_sample_updates_regenerate_df[basic]",
"tests/test_Project.py::TestProjectManipulationTests::test_subsample_table_property[subtable1]",
"tests/test_Project.py::TestProjectManipulationTests::test_get_sample[basic]",
"tests/test_Project.py::TestProjectManipulationTests::test_get_sample_nonexistent[basic]",
"tests/test_Project.py::TestPostInitSampleCreation::test_append[append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_imports[imports]",
"tests/test_Project.py::TestPostInitSampleCreation::test_imply[imply]",
"tests/test_Project.py::TestPostInitSampleCreation::test_duplicate[duplicate]",
"tests/test_Project.py::TestPostInitSampleCreation::test_derive[derive]",
"tests/test_Project.py::TestPostInitSampleCreation::test_equality[append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_unequality[derive-append]",
"tests/test_Project.py::TestPostInitSampleCreation::test_from_pandas[append-append]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-27 19:36:13+00:00
|
bsd-2-clause
| 4,516 |
|
perrygeo__python-rasterstats-287
|
diff --git a/pyproject.toml b/pyproject.toml
index 3520992..f5bfa65 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,10 +28,10 @@ classifiers = [
]
requires-python = ">=3.7"
dependencies = [
- "affine <3.0",
+ "affine",
"click >7.1",
"cligj >=0.4",
- "fiona <1.9",
+ "fiona",
"numpy >=1.9",
"rasterio >=1.0",
"simplejson",
@@ -42,6 +42,7 @@ dependencies = [
[project.optional-dependencies]
test = [
"coverage",
+ "geopandas",
"pyshp >=1.1.4",
"pytest >=4.6",
"pytest-cov >=2.2.0",
diff --git a/src/rasterstats/io.py b/src/rasterstats/io.py
index c2da133..e74d369 100644
--- a/src/rasterstats/io.py
+++ b/src/rasterstats/io.py
@@ -28,6 +28,21 @@ geom_types = [
"MultiPolygon",
]
+try:
+ # Fiona 1.9+
+ import fiona.model
+
+ def fiona_generator(obj, layer=0):
+ with fiona.open(obj, "r", layer=layer) as src:
+ for feat in src:
+ yield fiona.model.to_dict(feat)
+
+except ModuleNotFoundError:
+ # Fiona <1.9
+ def fiona_generator(obj, layer=0):
+ with fiona.open(obj, "r", layer=layer) as src:
+ yield from src
+
def wrap_geom(geom):
"""Wraps a geometry dict in an GeoJSON Feature"""
@@ -81,11 +96,7 @@ def read_features(obj, layer=0):
with fiona.open(obj, "r", layer=layer) as src:
assert len(src) > 0
- def fiona_generator(obj):
- with fiona.open(obj, "r", layer=layer) as src:
- yield from src
-
- features_iter = fiona_generator(obj)
+ features_iter = fiona_generator(obj, layer)
except (AssertionError, TypeError, OSError, DriverError, UnicodeDecodeError):
try:
mapping = json.loads(obj)
|
perrygeo/python-rasterstats
|
57c99fa269e7b4e450f2466b820c0afded7afebf
|
diff --git a/.github/workflows/test-rasterstats.yml b/.github/workflows/test-rasterstats.yml
index 8c652d3..56bdbc2 100644
--- a/.github/workflows/test-rasterstats.yml
+++ b/.github/workflows/test-rasterstats.yml
@@ -12,14 +12,20 @@ jobs:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
+ python -m pip install pip --upgrade
python -m pip install -e .[dev]
- - name: Test with pytest
+ - name: Test all packages
run: |
pytest
+ - name: Test with older packages
+ run: |
+ python -m pip uninstall --yes geopandas
+ python -m pip install "fiona<1.9" "shapely<2.0"
+ pytest
diff --git a/tests/test_io.py b/tests/test_io.py
index aa131bd..d70c3e2 100644
--- a/tests/test_io.py
+++ b/tests/test_io.py
@@ -8,6 +8,7 @@ import rasterio
from shapely.geometry import shape
from rasterstats.io import ( # todo parse_feature
+ fiona_generator,
Raster,
boundless_array,
bounds_window,
@@ -30,8 +31,7 @@ arr3d = np.array([[[1, 1, 1], [1, 1, 1], [1, 1, 1]]])
eps = 1e-6
-with fiona.open(polygons, "r") as src:
- target_features = [f for f in src]
+target_features = [f for f in fiona_generator(polygons)]
target_geoms = [shape(f["geometry"]) for f in target_features]
@@ -85,73 +85,63 @@ def test_featurecollection():
def test_shapely():
- with fiona.open(polygons, "r") as src:
- indata = [shape(f["geometry"]) for f in src]
+ indata = [shape(f["geometry"]) for f in fiona_generator(polygons)]
_test_read_features(indata)
_test_read_features_single(indata[0])
def test_wkt():
- with fiona.open(polygons, "r") as src:
- indata = [shape(f["geometry"]).wkt for f in src]
+ indata = [shape(f["geometry"]).wkt for f in fiona_generator(polygons)]
_test_read_features(indata)
_test_read_features_single(indata[0])
def test_wkb():
- with fiona.open(polygons, "r") as src:
- indata = [shape(f["geometry"]).wkb for f in src]
+ indata = [shape(f["geometry"]).wkb for f in fiona_generator(polygons)]
_test_read_features(indata)
_test_read_features_single(indata[0])
def test_mapping_features():
# list of Features
- with fiona.open(polygons, "r") as src:
- indata = [f for f in src]
+ indata = [f for f in fiona_generator(polygons)]
_test_read_features(indata)
def test_mapping_feature():
# list of Features
- with fiona.open(polygons, "r") as src:
- indata = [f for f in src]
+ indata = [f for f in fiona_generator(polygons)]
_test_read_features(indata[0])
def test_mapping_geoms():
- with fiona.open(polygons, "r") as src:
- indata = [f for f in src]
+ indata = [f for f in fiona_generator(polygons)]
_test_read_features(indata[0]["geometry"])
def test_mapping_collection():
indata = {"type": "FeatureCollection"}
- with fiona.open(polygons, "r") as src:
- indata["features"] = [f for f in src]
+ indata["features"] = [f for f in fiona_generator(polygons)]
_test_read_features(indata)
def test_jsonstr():
# Feature str
- with fiona.open(polygons, "r") as src:
- indata = [f for f in src]
+ indata = [f for f in fiona_generator(polygons)]
indata = json.dumps(indata[0])
_test_read_features(indata)
def test_jsonstr_geom():
# geojson geom str
- with fiona.open(polygons, "r") as src:
- indata = [f for f in src]
+ indata = [f for f in fiona_generator(polygons)]
indata = json.dumps(indata[0]["geometry"])
_test_read_features(indata)
def test_jsonstr_collection():
indata = {"type": "FeatureCollection"}
- with fiona.open(polygons, "r") as src:
- indata["features"] = [f for f in src]
+ indata["features"] = [f for f in fiona_generator(polygons)]
indata = json.dumps(indata)
_test_read_features(indata)
@@ -176,22 +166,19 @@ class MockGeoInterface:
def test_geo_interface():
- with fiona.open(polygons, "r") as src:
- indata = [MockGeoInterface(f) for f in src]
+ indata = [MockGeoInterface(f) for f in fiona_generator(polygons)]
_test_read_features(indata)
def test_geo_interface_geom():
- with fiona.open(polygons, "r") as src:
- indata = [MockGeoInterface(f["geometry"]) for f in src]
+ indata = [MockGeoInterface(f["geometry"]) for f in fiona_generator(polygons)]
_test_read_features(indata)
def test_geo_interface_collection():
# geointerface for featurecollection?
indata = {"type": "FeatureCollection"}
- with fiona.open(polygons, "r") as src:
- indata["features"] = [f for f in src]
+ indata["features"] = [f for f in fiona_generator(polygons)]
indata = MockGeoInterface(indata)
_test_read_features(indata)
|
allow fiona >= 1.9
Rasterstats currently has the fiona dependency pinned to <1.9 in project.toml.
Fiona 1.9 was released Jan 30. It would be nice if rasterstats could remove this restriction, so we can upgrade fiona in our projects.
|
0.0
|
57c99fa269e7b4e450f2466b820c0afded7afebf
|
[
"tests/test_io.py::test_fiona_path",
"tests/test_io.py::test_layer_index",
"tests/test_io.py::test_layer_name",
"tests/test_io.py::test_path_unicode",
"tests/test_io.py::test_featurecollection",
"tests/test_io.py::test_shapely",
"tests/test_io.py::test_wkt",
"tests/test_io.py::test_wkb",
"tests/test_io.py::test_mapping_features",
"tests/test_io.py::test_mapping_feature",
"tests/test_io.py::test_mapping_geoms",
"tests/test_io.py::test_mapping_collection",
"tests/test_io.py::test_jsonstr",
"tests/test_io.py::test_jsonstr_geom",
"tests/test_io.py::test_jsonstr_collection",
"tests/test_io.py::test_jsonstr_collection_without_features",
"tests/test_io.py::test_invalid_jsonstr",
"tests/test_io.py::test_geo_interface",
"tests/test_io.py::test_geo_interface_geom",
"tests/test_io.py::test_geo_interface_collection",
"tests/test_io.py::test_notafeature",
"tests/test_io.py::test_boundless",
"tests/test_io.py::test_boundless_masked",
"tests/test_io.py::test_window_bounds",
"tests/test_io.py::test_bounds_window",
"tests/test_io.py::test_rowcol",
"tests/test_io.py::test_Raster_index",
"tests/test_io.py::test_Raster",
"tests/test_io.py::test_Raster_boundless_disabled",
"tests/test_io.py::test_Raster_context",
"tests/test_io.py::test_geointerface"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-23 22:43:50+00:00
|
bsd-3-clause
| 4,517 |
|
peter-wangxu__persist-queue-127
|
diff --git a/README.rst b/README.rst
index aaa6ae1..13ca369 100644
--- a/README.rst
+++ b/README.rst
@@ -31,6 +31,14 @@ and `Pickling Class Instances(Python3) <https://docs.python.org/3/library/pickle
This project is based on the achievements of `python-pqueue <https://github.com/balena/python-pqueue>`_
and `queuelib <https://github.com/scrapy/queuelib>`_
+Slack channels
+^^^^^^^^^^^^^^
+
+Join `persist-queue <https://join.slack
+.com/t/persist-queue/shared_invite
+/enQtOTM0MDgzNTQ0MDg3LTNmN2IzYjQ1MDc0MDYzMjI4OGJmNmVkNWE3ZDBjYzg5MDc0OWUzZDJkYTkwODdkZmYwODdjNjUzMTk3MWExNDE>`_ channel
+
+
Requirements
------------
* Python 2.7 or Python 3.x.
@@ -193,6 +201,12 @@ The core functions:
- ``ack``: mark item as acked
- ``nack``: there might be something wrong with current consumer, so mark item as ready and new consumer will get it
- ``ack_failed``: there might be something wrong during process, so just mark item as failed.
+- ``clear_acked_data``: perform a sql delete agaist sqlite, it remove the
+latest 1000 items whose status is ``AckStatus.acked`` (note: this does not
+shrink the file size on disk)
+- ``shrink_disk_usage`` perform a ``VACUUM`` against the sqlite, and rebuild
+ the database file, this usually takes long time and frees a lot of disk space
+ after ``clear_acked_data``
.. code-block:: python
@@ -480,12 +494,6 @@ Contribution
Simply fork this repo and send PR for your code change(also tests to cover your change), remember to give a title and description of your PR. I am willing to
enhance this project with you :).
-Slack channels
-^^^^^^^^^^^^^^
-
-Join `persist-queue <https://join.slack
-.com/t/persist-queue/shared_invite
-/enQtOTM0MDgzNTQ0MDg3LTNmN2IzYjQ1MDc0MDYzMjI4OGJmNmVkNWE3ZDBjYzg5MDc0OWUzZDJkYTkwODdkZmYwODdjNjUzMTk3MWExNDE>`_ channel
License
-------
diff --git a/persistqueue/sqlackqueue.py b/persistqueue/sqlackqueue.py
index 45ea0f4..0e059c2 100644
--- a/persistqueue/sqlackqueue.py
+++ b/persistqueue/sqlackqueue.py
@@ -38,18 +38,18 @@ class SQLiteAckQueue(sqlbase.SQLiteBase):
'{key_column} INTEGER PRIMARY KEY AUTOINCREMENT, '
'data BLOB, timestamp FLOAT, status INTEGER)')
# SQL to insert a record
- _SQL_INSERT = 'INSERT INTO {table_name} (data, timestamp, status)'\
- ' VALUES (?, ?, %s)' % AckStatus.inited
+ _SQL_INSERT = 'INSERT INTO {table_name} (data, timestamp, status)' \
+ ' VALUES (?, ?, %s)' % AckStatus.inited
# SQL to select a record
_SQL_SELECT = ('SELECT {key_column}, data, status FROM {table_name} '
'WHERE status < %s '
'ORDER BY {key_column} ASC LIMIT 1' % AckStatus.unack)
- _SQL_MARK_ACK_UPDATE = 'UPDATE {table_name} SET status = ?'\
- ' WHERE {key_column} = ?'
- _SQL_SELECT_WHERE = 'SELECT {key_column}, data FROM {table_name}'\
- ' WHERE status < %s AND' \
- ' {column} {op} ? ORDER BY {key_column} ASC'\
- ' LIMIT 1 ' % AckStatus.unack
+ _SQL_MARK_ACK_UPDATE = 'UPDATE {table_name} SET status = ?' \
+ ' WHERE {key_column} = ?'
+ _SQL_SELECT_WHERE = 'SELECT {key_column}, data FROM {table_name}' \
+ ' WHERE status < %s AND' \
+ ' {column} {op} ? ORDER BY {key_column} ASC' \
+ ' LIMIT 1 ' % AckStatus.unack
def __init__(self, path, auto_resume=True, **kwargs):
super(SQLiteAckQueue, self).__init__(path, **kwargs)
@@ -65,9 +65,9 @@ class SQLiteAckQueue(sqlbase.SQLiteBase):
unack_count = self.unack_count()
if unack_count:
log.warning("resume %d unack tasks", unack_count)
- sql = 'UPDATE {} set status = ?'\
- ' WHERE status = ?'.format(self._table_name)
- return sql, (AckStatus.ready, AckStatus.unack, )
+ sql = 'UPDATE {} set status = ?' \
+ ' WHERE status = ?'.format(self._table_name)
+ return sql, (AckStatus.ready, AckStatus.unack,)
def put(self, item):
obj = self._serializer.dumps(item)
@@ -82,17 +82,17 @@ class SQLiteAckQueue(sqlbase.SQLiteBase):
self.total = self._count()
def _count(self):
- sql = 'SELECT COUNT({}) FROM {}'\
- ' WHERE status < ?'.format(self._key_column,
- self._table_name)
+ sql = 'SELECT COUNT({}) FROM {}' \
+ ' WHERE status < ?'.format(self._key_column,
+ self._table_name)
row = self._getter.execute(sql, (AckStatus.unack,)).fetchone()
return row[0] if row else 0
def _ack_count_via_status(self, status):
- sql = 'SELECT COUNT({}) FROM {}'\
- ' WHERE status = ?'.format(self._key_column,
- self._table_name)
- row = self._getter.execute(sql, (status, )).fetchone()
+ sql = 'SELECT COUNT({}) FROM {}' \
+ ' WHERE status = ?'.format(self._key_column,
+ self._table_name)
+ row = self._getter.execute(sql, (status,)).fetchone()
return row[0] if row else 0
def unack_count(self):
@@ -109,7 +109,7 @@ class SQLiteAckQueue(sqlbase.SQLiteBase):
@sqlbase.with_conditional_transaction
def _mark_ack_status(self, key, status):
- return self._sql_mark_ack_status, (status, key, )
+ return self._sql_mark_ack_status, (status, key,)
@sqlbase.with_conditional_transaction
def clear_acked_data(self):
@@ -123,6 +123,11 @@ class SQLiteAckQueue(sqlbase.SQLiteBase):
max_acked_length=self._MAX_ACKED_LENGTH)
return sql, AckStatus.acked
+ @sqlbase.with_conditional_transaction
+ def shrink_disk_usage(self):
+ sql = """VACUUM"""
+ return sql, ()
+
@property
def _sql_mark_ack_status(self):
return self._SQL_MARK_ACK_UPDATE.format(table_name=self._table_name,
|
peter-wangxu/persist-queue
|
7bdbcdeb03ca2e202a5a98a7be86783b778b6238
|
diff --git a/persistqueue/tests/test_sqlackqueue.py b/persistqueue/tests/test_sqlackqueue.py
index 2914eec..6d395db 100644
--- a/persistqueue/tests/test_sqlackqueue.py
+++ b/persistqueue/tests/test_sqlackqueue.py
@@ -212,6 +212,7 @@ class SQLite3AckQueueTest(unittest.TestCase):
self.assertEqual(q.acked_count(), 100)
q.clear_acked_data()
self.assertEqual(q.acked_count(), 10)
+ q.shrink_disk_usage()
def test_ack_unknown_item(self):
q = SQLiteAckQueue(path=self.path)
|
ACK Queue: clear_acked_data() behavior
Playing around with the ```clear_acked_data()``` function, it seems to hang on to the last 1000 acked queue items. Why is that? I've already acked the data, yet disk space continued to be used.
Looking at the code in question:
```python
@sqlbase.with_conditional_transaction
def clear_acked_data(self):
sql = """DELETE FROM {table_name}
WHERE {key_column} IN (
SELECT _id FROM {table_name} WHERE status = ?
ORDER BY {key_column} DESC
LIMIT 1000 OFFSET {max_acked_length}
)""".format(table_name=self._table_name,
key_column=self._key_column,
max_acked_length=self._MAX_ACKED_LENGTH)
return sql, AckStatus.acked
```
It seems that self._MAX_ACKED_LENGTH is a private member constant. Can this not be made tunable by the user (e.g.. a kwarg in __init__ for the class)?
I opened my resulting sqlite data files and manually ran:
```sql
DELETE FROM ack_queue_default WHERE status = 5;
VACUUM;
```
Which reduced the file size by several GB. Unless there is some edge case, surely you'd want to do something more like this?
```python
@sqlbase.with_conditional_transaction
def clear_acked_data(self):
sql = """DELETE FROM {table_name} WHERE status = ?""".format(table_name=self._table_name)
return sql, AckStatus.acked
@sqlbase.with_conditional_transaction
def shrink_disk_usage(self):
sql = """VACUUM"""
return sql, None
```
|
0.0
|
7bdbcdeb03ca2e202a5a98a7be86783b778b6238
|
[
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_and_clear",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_and_clear"
] |
[
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_unack_ack_failed",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_unknown_item",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_multi_threaded_multi_producer",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_multi_threaded_parallel",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_multiple_consumers",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_open_close_single",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_protocol_1",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_protocol_2",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_put_0",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_raise_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_random_read_write",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_resume_unack",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_unack_ack_failed",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_unknown_item",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_protocol_1",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_put_0",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_raise_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_random_read_write",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_add_duplicate_item"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-09 03:05:30+00:00
|
bsd-3-clause
| 4,518 |
|
peter-wangxu__persist-queue-182
|
diff --git a/README.rst b/README.rst
index 67ec6bf..111752f 100644
--- a/README.rst
+++ b/README.rst
@@ -180,6 +180,11 @@ Close the console, and then recreate the queue:
'str2'
>>>
+New functions:
+*Available since v0.8.0*
+
+- ``shrink_disk_usage`` perform a ``VACUUM`` against the sqlite, and rebuild the database file, this usually takes long time and frees a lot of disk space after ``get()``
+
Example usage of SQLite3 based ``UniqueQ``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/persistqueue/__init__.py b/persistqueue/__init__.py
index 5810b3b..21b4ced 100644
--- a/persistqueue/__init__.py
+++ b/persistqueue/__init__.py
@@ -1,7 +1,7 @@
# coding=utf-8
__author__ = 'Peter Wang'
__license__ = 'BSD'
-__version__ = '0.8.0-alpha0'
+__version__ = '0.8.0-beta0'
from .exceptions import Empty, Full # noqa
from .queue import Queue # noqa
diff --git a/persistqueue/sqlackqueue.py b/persistqueue/sqlackqueue.py
index 80b0197..877ebad 100644
--- a/persistqueue/sqlackqueue.py
+++ b/persistqueue/sqlackqueue.py
@@ -167,11 +167,6 @@ class SQLiteAckQueue(sqlbase.SQLiteBase):
)
return sql, AckStatus.acked
- @sqlbase.with_conditional_transaction
- def shrink_disk_usage(self):
- sql = """VACUUM"""
- return sql, ()
-
@property
def _sql_mark_ack_status(self):
return self._SQL_MARK_ACK_UPDATE.format(
diff --git a/persistqueue/sqlbase.py b/persistqueue/sqlbase.py
index 607b3cb..84dba02 100644
--- a/persistqueue/sqlbase.py
+++ b/persistqueue/sqlbase.py
@@ -212,6 +212,11 @@ class SQLBase(object):
datarows.append(item)
return datarows
+ @with_conditional_transaction
+ def shrink_disk_usage(self):
+ sql = """VACUUM"""
+ return sql, ()
+
@property
def size(self):
return self.total
|
peter-wangxu/persist-queue
|
464a82063f9421409491b9e099b0ea4c46fdb411
|
diff --git a/persistqueue/tests/test_sqlackqueue.py b/persistqueue/tests/test_sqlackqueue.py
index e455da6..ee81f30 100644
--- a/persistqueue/tests/test_sqlackqueue.py
+++ b/persistqueue/tests/test_sqlackqueue.py
@@ -75,6 +75,7 @@ class SQLite3AckQueueTest(unittest.TestCase):
# assert adding another one still works
q.put('foobar')
data = q.get()
+ q.shrink_disk_usage()
self.assertEqual('foobar', data)
def test_random_read_write(self):
diff --git a/persistqueue/tests/test_sqlqueue.py b/persistqueue/tests/test_sqlqueue.py
index 87cfb90..4f5452a 100644
--- a/persistqueue/tests/test_sqlqueue.py
+++ b/persistqueue/tests/test_sqlqueue.py
@@ -75,6 +75,7 @@ class SQLite3QueueTest(unittest.TestCase):
# assert adding another one still works
q.put('foobar')
data = q.get()
+ q.shrink_disk_usage()
self.assertEqual('foobar', data)
def test_random_read_write(self):
|
File is not getting cleared after get() function
I entered data to file in file-based queue. But after performing get() the elements from the queue is deleted but if we open the file we can still see the data. So i think the file size will keep on increasing if we keep on increasing data.
|
0.0
|
464a82063f9421409491b9e099b0ea4c46fdb411
|
[
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_1000"
] |
[
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_active_size",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_and_clear",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_unack_ack_failed",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_ack_unknown_item",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_get_id",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_get_next_in_order",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_get_raw",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_multi_threaded_multi_producer",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_multi_threaded_parallel",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_multiple_consumers",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_nack_raw",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_open_close_single",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_protocol_1",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_protocol_2",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_put_0",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_queue",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_raise_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_random_read_write",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_resume_unack",
"persistqueue/tests/test_sqlackqueue.py::SQLite3AckQueueTest::test_update",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_active_size",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_and_clear",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_unack_ack_failed",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_ack_unknown_item",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_get_id",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_get_next_in_order",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_get_raw",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_nack_raw",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_protocol_1",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_put_0",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_queue",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_raise_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_random_read_write",
"persistqueue/tests/test_sqlackqueue.py::SQLite3QueueInMemory::test_update",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_ack_active_size",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_ack_and_clear",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_ack_unack_ack_failed",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_ack_unknown_item",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_empty",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_get_id",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_get_next_in_order",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_get_raw",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_multi_threaded_multi_producer",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_multi_threaded_parallel",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_multiple_consumers",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_nack_raw",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_open_close_single",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_protocol_1",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_protocol_2",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_put_0",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_queue",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_raise_empty",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_random_read_write",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_resume_unack",
"persistqueue/tests/test_sqlackqueue.py::FILOSQLite3AckQueueTest::test_update",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_ack_active_size",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_ack_and_clear",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_ack_unack_ack_failed",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_ack_unknown_item",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_add_duplicate_item",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_get_id",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_get_next_in_order",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_get_raw",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_multi_threaded_multi_producer",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_multi_threaded_parallel",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_multiple_consumers",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_nack_raw",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_open_close_single",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_protocol_1",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_protocol_2",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_put_0",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_queue",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_raise_empty",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_random_read_write",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_resume_unack",
"persistqueue/tests/test_sqlackqueue.py::SQLite3UniqueAckQueueTest::test_update",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_empty",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_get_id",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_get_raw",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_json_serializer",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_multi_producer",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_parallel",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_multiple_consumers",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_single",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_protocol_1",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_protocol_2",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_put_0",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_queue",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_raise_empty",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_random_read_write",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_task_done_with_restart",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueTest::test_update",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_empty",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_get_id",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_get_raw",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_json_serializer",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_multi_producer",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_parallel",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_single",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_protocol_1",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_protocol_2",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_put_0",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_queue",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_raise_empty",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_random_read_write",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_task_done_with_restart",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_update",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_empty",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_get_id",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_get_raw",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_json_serializer",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_protocol_1",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_put_0",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_queue",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_raise_empty",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_random_read_write",
"persistqueue/tests/test_sqlqueue.py::SQLite3QueueInMemory::test_update",
"persistqueue/tests/test_sqlqueue.py::FILOSQLite3QueueTest::test_open_close_1000",
"persistqueue/tests/test_sqlqueue.py::FILOSQLite3QueueNoAutoCommitTest::test_open_close_1000",
"persistqueue/tests/test_sqlqueue.py::SQLite3UniqueQueueTest::test_add_duplicate_item",
"persistqueue/tests/test_sqlqueue.py::SQLite3UniqueQueueTest::test_multiple_consumers",
"persistqueue/tests/test_sqlqueue.py::SQLite3UniqueQueueTest::test_unique_dictionary_serialization_json",
"persistqueue/tests/test_sqlqueue.py::SQLite3UniqueQueueTest::test_unique_dictionary_serialization_pickle"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-04-22 12:22:36+00:00
|
bsd-3-clause
| 4,519 |
|
peter-wangxu__persist-queue-25
|
diff --git a/persistqueue/__init__.py b/persistqueue/__init__.py
index c066e10..ef321ef 100644
--- a/persistqueue/__init__.py
+++ b/persistqueue/__init__.py
@@ -1,7 +1,7 @@
# coding=utf-8
__author__ = 'Peter Wang'
__license__ = 'BSD License'
-__version__ = '0.3.0'
+__version__ = '0.3.1'
from .exceptions import Empty, Full # noqa
from .pdict import PDict # noqa
diff --git a/persistqueue/sqlbase.py b/persistqueue/sqlbase.py
index 22bdfe6..48955f7 100644
--- a/persistqueue/sqlbase.py
+++ b/persistqueue/sqlbase.py
@@ -27,15 +27,17 @@ def commit_ignore_error(conn):
"""Ignore the error of no transaction is active.
The transaction may be already committed by user's task_done call.
- It's safe to to ignore all errors of this kind.
+ It's safe to ignore all errors of this kind.
"""
try:
conn.commit()
except sqlite3.OperationalError as ex:
if 'no transaction is active' in str(ex):
- log.warning(
+ log.debug(
'Not able to commit the transaction, '
'may already be committed.')
+ else:
+ raise
class SQLiteBase(object):
@@ -50,17 +52,23 @@ class SQLiteBase(object):
_MEMORY = ':memory:' # flag indicating store DB in memory
def __init__(self, path, name='default', multithreading=False,
- timeout=10.0, auto_commit=False):
+ timeout=10.0, auto_commit=True):
"""Initiate a queue in sqlite3 or memory.
:param path: path for storing DB file.
+ :param name: the suffix for the table name,
+ table name would be ${_TABLE_NAME}_${name}
:param multithreading: if set to True, two db connections will be,
one for **put** and one for **get**.
:param timeout: timeout in second waiting for the database lock.
:param auto_commit: Set to True, if commit is required on every
- INSERT/UPDATE action.
+ INSERT/UPDATE action, otherwise False, whereas
+ a **task_done** is required to persist changes
+ after **put**.
+
"""
+ self.memory_sql = False
self.path = path
self.name = name
self.timeout = timeout
@@ -71,19 +79,27 @@ class SQLiteBase(object):
def _init(self):
"""Initialize the tables in DB."""
- if not os.path.exists(self.path):
+ if self.path == self._MEMORY:
+ self.memory_sql = True
+ log.debug("Initializing Sqlite3 Queue in memory.")
+ elif not os.path.exists(self.path):
os.makedirs(self.path)
- log.debug('Initializing Sqlite3 Queue with path {}'.format(self.path))
+ log.debug(
+ 'Initializing Sqlite3 Queue with path {}'.format(self.path))
self._conn = self._new_db_connection(
self.path, self.multithreading, self.timeout)
self._getter = self._conn
self._putter = self._conn
- if self.multithreading:
- self._putter = self._new_db_connection(
- self.path, self.multithreading, self.timeout)
+
self._conn.execute(self._sql_create)
self._conn.commit()
+ # Setup another session only for disk-based queue.
+ if self.multithreading:
+ if not self.memory_sql:
+ self._putter = self._new_db_connection(
+ self.path, self.multithreading, self.timeout)
+
# SQLite3 transaction lock
self.tran_lock = threading.Lock()
self.put_event = threading.Event()
|
peter-wangxu/persist-queue
|
10b8fa0e8bf5da6d44dbeb85f94a2b0779685d41
|
diff --git a/tests/test_sqlqueue.py b/tests/test_sqlqueue.py
index 61a31db..1e63431 100644
--- a/tests/test_sqlqueue.py
+++ b/tests/test_sqlqueue.py
@@ -150,23 +150,32 @@ class SQLite3QueueTest(unittest.TestCase):
queue.put('var%d' % x)
task_done_if_required(queue)
- def consumer():
- for _ in range(100):
+ counter = []
+ # Set all to 0
+ for _ in range(1000):
+ counter.append(0)
+
+ def consumer(index):
+ for i in range(200):
data = queue.get(block=True)
self.assertTrue('var' in data)
+ counter[index * 200 + i] = data
p = Thread(target=producer)
p.start()
consumers = []
- for _ in range(10):
- t = Thread(target=consumer)
+ for index in range(5):
+ t = Thread(target=consumer, args=(index,))
t.start()
consumers.append(t)
+ p.join()
for t in consumers:
t.join()
self.assertEqual(0, queue.qsize())
+ for x in range(1000):
+ self.assertNotEqual(0, counter[x], "0 for counter's index %s" % x)
class SQLite3QueueAutoCommitTest(SQLite3QueueTest):
@@ -175,6 +184,24 @@ class SQLite3QueueAutoCommitTest(SQLite3QueueTest):
self.auto_commit = True
+class SQLite3QueueInMemory(SQLite3QueueTest):
+ def setUp(self):
+ self.path = ":memory:"
+ self.auto_commit = False
+
+ def test_open_close_1000(self):
+ self.skipTest('Memory based sqlite is not persistent.')
+
+ def test_open_close_single(self):
+ self.skipTest('Memory based sqlite is not persistent.')
+
+ def test_multiple_consumers(self):
+ # TODO(peter) when the shared-cache feature is available in default
+ # Python of most Linux distros, this should be easy:).
+ self.skipTest('In-memory based sqlite needs the support '
+ 'of shared-cache')
+
+
class FILOSQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue')
|
FIFOSQLiteQueue: the get() method returns None instead of blocking
and if I specify get(block=True) it raises the empty exception
|
0.0
|
10b8fa0e8bf5da6d44dbeb85f94a2b0779685d41
|
[
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_multi_threaded_parallel"
] |
[
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_parallel",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multiple_consumers",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_1000",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_single",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_random_read_write",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_multi_threaded_parallel",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_multiple_consumers",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_open_close_1000",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_open_close_single",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueAutoCommitTest::test_random_read_write",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_random_read_write",
"tests/test_sqlqueue.py::FILOSQLite3QueueTest::test_open_close_1000",
"tests/test_sqlqueue.py::FILOSQLite3QueueAutoCommitTest::test_open_close_1000"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-10-10 02:39:16+00:00
|
bsd-3-clause
| 4,520 |
|
peter-wangxu__persist-queue-28
|
diff --git a/persistqueue/sqlbase.py b/persistqueue/sqlbase.py
index 48955f7..e0a7672 100644
--- a/persistqueue/sqlbase.py
+++ b/persistqueue/sqlbase.py
@@ -79,6 +79,7 @@ class SQLiteBase(object):
def _init(self):
"""Initialize the tables in DB."""
+
if self.path == self._MEMORY:
self.memory_sql = True
log.debug("Initializing Sqlite3 Queue in memory.")
@@ -99,19 +100,26 @@ class SQLiteBase(object):
if not self.memory_sql:
self._putter = self._new_db_connection(
self.path, self.multithreading, self.timeout)
-
+ if self.auto_commit is False:
+ log.warning('auto_commit=False is still experimental,'
+ 'only use it with care.')
+ self._getter.isolation_level = "DEFERRED"
+ self._putter.isolation_level = "DEFERRED"
# SQLite3 transaction lock
self.tran_lock = threading.Lock()
self.put_event = threading.Event()
def _new_db_connection(self, path, multithreading, timeout):
+ conn = None
if path == self._MEMORY:
- return sqlite3.connect(path,
+ conn = sqlite3.connect(path,
check_same_thread=not multithreading)
else:
- return sqlite3.connect('{}/data.db'.format(path),
+ conn = sqlite3.connect('{}/data.db'.format(path),
timeout=timeout,
check_same_thread=not multithreading)
+ conn.execute('PRAGMA journal_mode=WAL;')
+ return conn
@with_conditional_transaction
def _insert_into(self, *record):
@@ -134,7 +142,7 @@ class SQLiteBase(object):
def _count(self):
sql = 'SELECT COUNT({}) FROM {}'.format(self._key_column,
self._table_name)
- row = self._putter.execute(sql).fetchone()
+ row = self._getter.execute(sql).fetchone()
return row[0] if row else 0
def _task_done(self):
diff --git a/persistqueue/sqlqueue.py b/persistqueue/sqlqueue.py
index 6c86f2f..2a53cfe 100644
--- a/persistqueue/sqlqueue.py
+++ b/persistqueue/sqlqueue.py
@@ -15,6 +15,9 @@ sqlite3.enable_callback_tracebacks(True)
log = logging.getLogger(__name__)
+# 10 seconds internal for `wait` of event
+TICK_FOR_WAIT = 10
+
class SQLiteQueue(sqlbase.SQLiteBase):
"""SQLite3 based FIFO queue."""
@@ -44,7 +47,7 @@ class SQLiteQueue(sqlbase.SQLiteBase):
def _pop(self):
with self.action_lock:
row = self._select()
- # Perhaps a sqilite bug, sometimes (None, None) is returned
+ # Perhaps a sqlite3 bug, sometimes (None, None) is returned
# by select, below can avoid these invalid records.
if row and row[0] is not None:
self._delete(row[0])
@@ -54,23 +57,31 @@ class SQLiteQueue(sqlbase.SQLiteBase):
return row[1] # pickled data
return None
- def get(self, block=False):
- unpickled = self._pop()
- item = None
- if unpickled:
- item = pickle.loads(unpickled)
+ def get(self, block=True, timeout=None):
+ if not block:
+ pickled = self._pop()
+ if not pickled:
+ raise Empty
+ elif timeout is None:
+ # block until a put event.
+ pickled = self._pop()
+ while not pickled:
+ self.put_event.wait(TICK_FOR_WAIT)
+ pickled = self._pop()
+ elif timeout < 0:
+ raise ValueError("'timeout' must be a non-negative number")
else:
- if block:
- end = _time.time() + 10.0
- while not unpickled:
- remaining = end - _time.time()
- if remaining <= 0.0:
- raise Empty
- # wait for no more than 10 seconds
- self.put_event.wait(remaining)
- unpickled = self._pop()
- item = pickle.loads(unpickled)
-
+ # block until the timeout reached
+ endtime = _time.time() + timeout
+ pickled = self._pop()
+ while not pickled:
+ remaining = endtime - _time.time()
+ if remaining <= 0.0:
+ raise Empty
+ self.put_event.wait(
+ TICK_FOR_WAIT if TICK_FOR_WAIT < remaining else remaining)
+ pickled = self._pop()
+ item = pickle.loads(pickled)
return item
def task_done(self):
|
peter-wangxu/persist-queue
|
8cd900781aa449d2e921bf5db953d02815110646
|
diff --git a/tests/test_sqlqueue.py b/tests/test_sqlqueue.py
index 1e63431..fe00f42 100644
--- a/tests/test_sqlqueue.py
+++ b/tests/test_sqlqueue.py
@@ -18,7 +18,7 @@ def task_done_if_required(queue):
class SQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue')
- self.auto_commit = False
+ self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
@@ -30,7 +30,12 @@ class SQLite3QueueTest(unittest.TestCase):
task_done_if_required(q)
d = q.get()
self.assertEqual('first', d)
- self.assertRaises(Empty, q.get, block=True)
+ self.assertRaises(Empty, q.get, block=False)
+
+ # assert with timeout
+ self.assertRaises(Empty, q.get, block=True, timeout=1.0)
+ # assert with negative timeout
+ self.assertRaises(ValueError, q.get, block=True, timeout=-1.0)
def test_open_close_single(self):
"""Write 1 item, close, reopen checking if same item is there"""
@@ -75,7 +80,7 @@ class SQLite3QueueTest(unittest.TestCase):
q.get()
n -= 1
else:
- self.assertEqual(None, q.get())
+ self.assertRaises(Empty, q.get, block=False)
else:
q.put('var%d' % random.getrandbits(16))
task_done_if_required(q)
@@ -108,7 +113,7 @@ class SQLite3QueueTest(unittest.TestCase):
c.join()
self.assertEqual(0, m_queue.size)
self.assertEqual(0, len(m_queue))
- self.assertIsNone(m_queue.get(block=False))
+ self.assertRaises(Empty, m_queue.get, block=False)
def test_multi_threaded_multi_producer(self):
"""Test sqlqueue can be used by multiple producers."""
@@ -175,19 +180,35 @@ class SQLite3QueueTest(unittest.TestCase):
self.assertEqual(0, queue.qsize())
for x in range(1000):
- self.assertNotEqual(0, counter[x], "0 for counter's index %s" % x)
+ self.assertNotEqual(0, counter[x],
+ "not 0 for counter's index %s" % x)
-class SQLite3QueueAutoCommitTest(SQLite3QueueTest):
+class SQLite3QueueNoAutoCommitTest(SQLite3QueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue_auto_commit')
- self.auto_commit = True
+ self.auto_commit = False
+
+ def test_multiple_consumers(self):
+ """
+ FAIL: test_multiple_consumers (
+ -tests.test_sqlqueue.SQLite3QueueNoAutoCommitTest)
+ Test sqlqueue can be used by multiple consumers.
+ ----------------------------------------------------------------------
+ Traceback (most recent call last):
+ File "persist-queue\tests\test_sqlqueue.py", line 183,
+ -in test_multiple_consumers
+ self.assertEqual(0, queue.qsize())
+ AssertionError: 0 != 72
+ :return:
+ """
+ self.skipTest('Skipped due to a known bug above.')
class SQLite3QueueInMemory(SQLite3QueueTest):
def setUp(self):
self.path = ":memory:"
- self.auto_commit = False
+ self.auto_commit = True
def test_open_close_1000(self):
self.skipTest('Memory based sqlite is not persistent.')
@@ -196,16 +217,22 @@ class SQLite3QueueInMemory(SQLite3QueueTest):
self.skipTest('Memory based sqlite is not persistent.')
def test_multiple_consumers(self):
- # TODO(peter) when the shared-cache feature is available in default
- # Python of most Linux distros, this should be easy:).
- self.skipTest('In-memory based sqlite needs the support '
- 'of shared-cache')
+ self.skipTest('Skipped due to occasional crash during '
+ 'multithreading mode.')
+
+ def test_multi_threaded_multi_producer(self):
+ self.skipTest('Skipped due to occasional crash during '
+ 'multithreading mode.')
+
+ def test_multi_threaded_parallel(self):
+ self.skipTest('Skipped due to occasional crash during '
+ 'multithreading mode.')
class FILOSQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue')
- self.auto_commit = False
+ self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
@@ -230,7 +257,7 @@ class FILOSQLite3QueueTest(unittest.TestCase):
self.assertEqual('foobar', data)
-class FILOSQLite3QueueAutoCommitTest(FILOSQLite3QueueTest):
+class FILOSQLite3QueueNoAutoCommitTest(FILOSQLite3QueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue_auto_commit')
- self.auto_commit = True
+ self.auto_commit = False
|
FIFOSQLiteQueue: the get() method returns None instead of blocking
and if I specify get(block=True) it raises the empty exception
|
0.0
|
8cd900781aa449d2e921bf5db953d02815110646
|
[
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_parallel",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_random_read_write",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_parallel",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_random_read_write",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_raise_empty",
"tests/test_sqlqueue.py::SQLite3QueueInMemory::test_random_read_write"
] |
[
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_multiple_consumers",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_1000",
"tests/test_sqlqueue.py::SQLite3QueueTest::test_open_close_single",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_multi_threaded_multi_producer",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_1000",
"tests/test_sqlqueue.py::SQLite3QueueNoAutoCommitTest::test_open_close_single",
"tests/test_sqlqueue.py::FILOSQLite3QueueTest::test_open_close_1000",
"tests/test_sqlqueue.py::FILOSQLite3QueueNoAutoCommitTest::test_open_close_1000"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-10-20 14:55:36+00:00
|
bsd-3-clause
| 4,521 |
|
peterbe__premailer-218
|
diff --git a/premailer/premailer.py b/premailer/premailer.py
index 344a6ad..37fd743 100644
--- a/premailer/premailer.py
+++ b/premailer/premailer.py
@@ -311,18 +311,18 @@ class Premailer(object):
return rules, leftover
def transform(self, html=None, pretty_print=True, **kwargs):
- """change the self.html and return it with CSS turned into style
+ """change the html and return it with CSS turned into style
attributes.
"""
- if html is not None:
- if self.html is not None:
- raise TypeError("Can't pass html argument twice")
- self.html = html
- elif self.html is None:
+ if html is not None and self.html is not None:
+ raise TypeError("Can't pass html argument twice")
+ elif html is None and self.html is None:
raise TypeError("must pass html as first argument")
- if hasattr(self.html, "getroottree"):
+ elif html is None:
+ html = self.html
+ if hasattr(html, "getroottree"):
# skip the next bit
- root = self.html.getroottree()
+ root = html.getroottree()
page = root
tree = root
else:
@@ -330,7 +330,7 @@ class Premailer(object):
parser = etree.XMLParser(ns_clean=False, resolve_entities=False)
else:
parser = etree.HTMLParser()
- stripped = self.html.strip()
+ stripped = html.strip()
tree = etree.fromstring(stripped, parser).getroottree()
page = tree.getroot()
# lxml inserts a doctype if none exists, so only include it in
@@ -379,6 +379,7 @@ class Premailer(object):
css_body = self._load_external(href)
these_rules, these_leftover = self._parse_style_rules(css_body, index)
+
index += 1
rules.extend(these_rules)
parent_of_element = element.getparent()
@@ -522,7 +523,7 @@ class Premailer(object):
continue
parent.attrib[attr] = urljoin(self.base_url, url)
- if hasattr(self.html, "getroottree"):
+ if hasattr(html, "getroottree"):
return root
else:
kwargs.setdefault("method", self.method)
|
peterbe/premailer
|
350af2440ccc9598d01841d1a22a8e5236da85a1
|
diff --git a/premailer/tests/test_premailer.py b/premailer/tests/test_premailer.py
index f0ef4ac..0ff7a37 100644
--- a/premailer/tests/test_premailer.py
+++ b/premailer/tests/test_premailer.py
@@ -215,6 +215,68 @@ class Tests(unittest.TestCase):
p = Premailer()
assert_raises(TypeError, p.transform)
+ def test_instance_reuse(self):
+ """test whether the premailer instance can be reused"""
+
+ html_1 = """<html>
+ <head>
+ <title>Title</title>
+ <style type="text/css">
+ h1, h2 { color:red; }
+ strong {
+ text-decoration:none
+ }
+ </style>
+ </head>
+ <body>
+ <h1>Hi!</h1>
+ <p><strong>Yes!</strong></p>
+ </body>
+ </html>"""
+
+ html_2 = """<html>
+ <head>
+ <title>Another Title</title>
+ <style type="text/css">
+ h1, h2 { color:blue; }
+ strong {
+ text-decoration:underline
+ }
+ </style>
+ </head>
+ <body>
+ <h1>Hello!</h1>
+ <p><strong>Nope!</strong></p>
+ </body>
+ </html>"""
+
+ expect_html_1 = """<html>
+ <head>
+ <title>Title</title>
+ </head>
+ <body>
+ <h1 style="color:red">Hi!</h1>
+ <p><strong style="text-decoration:none">Yes!</strong></p>
+ </body>
+ </html>"""
+
+ expect_html_2 = """<html>
+ <head>
+ <title>Another Title</title>
+ </head>
+ <body>
+ <h1 style="color:blue">Hello!</h1>
+ <p><strong style="text-decoration:underline">Nope!</strong></p>
+ </body>
+ </html>"""
+
+ p = Premailer()
+ result_html_1 = p.transform(html_1)
+ result_html_2 = p.transform(html_2)
+
+ compare_html(expect_html_1, result_html_1)
+ compare_html(expect_html_2, result_html_2)
+
def test_remove_classes(self):
"""test the simplest case"""
|
Reusing premailer instances does not work
The README contains a [nice section](https://github.com/peterbe/premailer#if-execution-speed-is-on-your-mind) on speeding up premailer by reusing premailer instances. This, however, [throws an exception](https://github.com/peterbe/premailer/blob/master/premailer/premailer.py#L319) on subsequent iterations because the instance's internal `html` attribute was already set on the first iteration.
Possible solutions:
**a)** Refactor code to allow for multiple runs
**b)** Remove problematic section from documentation
|
0.0
|
350af2440ccc9598d01841d1a22a8e5236da85a1
|
[
"premailer/tests/test_premailer.py::Tests::test_instance_reuse"
] |
[
"premailer/tests/test_premailer.py::Tests::test_3_digit_color_expand",
"premailer/tests/test_premailer.py::Tests::test_align_float_images",
"premailer/tests/test_premailer.py::Tests::test_apple_newsletter_example",
"premailer/tests/test_premailer.py::Tests::test_base_url_fixer",
"premailer/tests/test_premailer.py::Tests::test_base_url_ignore_links",
"premailer/tests/test_premailer.py::Tests::test_base_url_with_path",
"premailer/tests/test_premailer.py::Tests::test_basic_html",
"premailer/tests/test_premailer.py::Tests::test_basic_html_argument_wrong",
"premailer/tests/test_premailer.py::Tests::test_basic_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_basic_html_with_pseudo_selector",
"premailer/tests/test_premailer.py::Tests::test_basic_xml",
"premailer/tests/test_premailer.py::Tests::test_broken_xml",
"premailer/tests/test_premailer.py::Tests::test_capture_cssutils_logging",
"premailer/tests/test_premailer.py::Tests::test_child_selector",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_argument",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_stdin",
"premailer/tests/test_premailer.py::Tests::test_command_line_preserve_style_tags",
"premailer/tests/test_premailer.py::Tests::test_comments_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_css_disable_basic_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_disable_leftover_css",
"premailer/tests/test_premailer.py::Tests::test_css_text",
"premailer/tests/test_premailer.py::Tests::test_css_text_with_only_body_present",
"premailer/tests/test_premailer.py::Tests::test_css_with_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_excluded",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_included",
"premailer/tests/test_premailer.py::Tests::test_disabled_validator",
"premailer/tests/test_premailer.py::Tests::test_doctype",
"premailer/tests/test_premailer.py::Tests::test_empty_style_tag",
"premailer/tests/test_premailer.py::Tests::test_external_links",
"premailer/tests/test_premailer.py::Tests::test_external_links_disallow_network",
"premailer/tests/test_premailer.py::Tests::test_external_links_unfindable",
"premailer/tests/test_premailer.py::Tests::test_external_styles_and_links",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_http",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_https",
"premailer/tests/test_premailer.py::Tests::test_external_styles_with_base_url",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_class_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_element_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_id_over_others",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_important_over_others",
"premailer/tests/test_premailer.py::Tests::test_fontface_selectors_with_no_selectortext",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_external_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_incorrectly",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_inline_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_style_elements_with_media_attribute",
"premailer/tests/test_premailer.py::Tests::test_include_star_selector",
"premailer/tests/test_premailer.py::Tests::test_inline_important",
"premailer/tests/test_premailer.py::Tests::test_inline_wins_over_external",
"premailer/tests/test_premailer.py::Tests::test_keyframe_selectors",
"premailer/tests/test_premailer.py::Tests::test_kwargs_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_last_child",
"premailer/tests/test_premailer.py::Tests::test_last_child_exclude_pseudo",
"premailer/tests/test_premailer.py::Tests::test_leftover_important",
"premailer/tests/test_premailer.py::Tests::test_links_without_protocol",
"premailer/tests/test_premailer.py::Tests::test_load_external_url",
"premailer/tests/test_premailer.py::Tests::test_load_external_url_404",
"premailer/tests/test_premailer.py::Tests::test_mailto_url",
"premailer/tests/test_premailer.py::Tests::test_mediaquery",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_basic",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_non_trivial",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_class",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_unset",
"premailer/tests/test_premailer.py::Tests::test_mixed_pseudo_selectors",
"premailer/tests/test_premailer.py::Tests::test_multiple_style_elements",
"premailer/tests/test_premailer.py::Tests::test_multithreading",
"premailer/tests/test_premailer.py::Tests::test_parse_style_rules",
"premailer/tests/test_premailer.py::Tests::test_precedence_comparison",
"premailer/tests/test_premailer.py::Tests::test_prefer_inline_to_class",
"premailer/tests/test_premailer.py::Tests::test_pseudo_selectors_without_selector",
"premailer/tests/test_premailer.py::Tests::test_remove_classes",
"premailer/tests/test_premailer.py::Tests::test_remove_unset_properties",
"premailer/tests/test_premailer.py::Tests::test_six_color",
"premailer/tests/test_premailer.py::Tests::test_strip_important",
"premailer/tests/test_premailer.py::Tests::test_style_attribute_specificity",
"premailer/tests/test_premailer.py::Tests::test_style_block_with_external_urls",
"premailer/tests/test_premailer.py::Tests::test_tel_url",
"premailer/tests/test_premailer.py::Tests::test_turnoff_cache_works_as_expected",
"premailer/tests/test_premailer.py::Tests::test_type_test",
"premailer/tests/test_premailer.py::Tests::test_unknown_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_uppercase_margin",
"premailer/tests/test_premailer.py::Tests::test_xml_cdata"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-22 16:57:59+00:00
|
bsd-3-clause
| 4,522 |
|
peterbe__premailer-258
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 85c1be6..debbd61 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,6 +4,10 @@ premailer Changes
Peter's note: Unfortunately, ``premailer`` didn't use to keep a change log. But it's
never too late to start, so let's start here and now.
+Unreleased
+----------
+* New option ``session=None`` to provide the session used for making http requests.
+
3.9.0
-----
diff --git a/README.rst b/README.rst
index 0dad963..1b6b8cc 100644
--- a/README.rst
+++ b/README.rst
@@ -137,6 +137,7 @@ The ``transform`` shortcut function transforms the given HTML using the defaults
allow_network=True # allow network access to fetch linked css files
allow_insecure_ssl=False # Don't allow unverified SSL certificates for external links
allow_loading_external_files=False # Allow loading any non-HTTP external file URL
+ session=None # Session used for http requests - supply your own for caching or to provide authentication
For more advanced options, check out the code of the ``Premailer`` class
and all its options in its constructor.
diff --git a/premailer/premailer.py b/premailer/premailer.py
index b9d1390..66907ca 100644
--- a/premailer/premailer.py
+++ b/premailer/premailer.py
@@ -140,6 +140,7 @@ class Premailer(object):
allow_network=True,
allow_insecure_ssl=False,
allow_loading_external_files=False,
+ session=None,
):
self.html = html
self.base_url = base_url
@@ -185,6 +186,7 @@ class Premailer(object):
self.allow_network = allow_network
self.allow_insecure_ssl = allow_insecure_ssl
self.allow_loading_external_files = allow_loading_external_files
+ self.session = session or requests
if cssutils_logging_handler:
cssutils.log.addHandler(cssutils_logging_handler)
@@ -568,7 +570,7 @@ class Premailer(object):
return out
def _load_external_url(self, url):
- response = requests.get(url, verify=not self.allow_insecure_ssl)
+ response = self.session.get(url, verify=not self.allow_insecure_ssl)
response.raise_for_status()
return response.text
|
peterbe/premailer
|
7dcee9e68756442effb087c97823b444dde74a0a
|
diff --git a/premailer/tests/test_premailer.py b/premailer/tests/test_premailer.py
index 9c0c409..b4e4180 100644
--- a/premailer/tests/test_premailer.py
+++ b/premailer/tests/test_premailer.py
@@ -2084,6 +2084,17 @@ ent:"" !important;display:block !important}
mocked_requests.get.assert_called_once_with(faux_uri, verify=True)
eq_(faux_response, r)
+ def test_load_external_url_with_custom_session(self):
+ mocked_session = mock.MagicMock()
+ faux_response = "This is not a response"
+ faux_uri = "https://example.com/site.css"
+ mocked_session.get.return_value = MockResponse(faux_response)
+ p = premailer.premailer.Premailer("<p>A paragraph</p>", session=mocked_session)
+ r = p._load_external_url(faux_uri)
+
+ mocked_session.get.assert_called_once_with(faux_uri, verify=True)
+ eq_(faux_response, r)
+
@mock.patch("premailer.premailer.requests")
def test_load_external_url_no_insecure_ssl(self, mocked_requests):
"Test premailer.premailer.Premailer._load_external_url"
|
Add cache support for _load_external_url
It would be handy if one could use `allow_network` to fetch styles and also rely on a cache for speedy results
It might be simple enough to just expose the session creation used for requests. Then something like [CacheControl](https://2.python-requests.org/en/master/community/recommended/#cachecontrol) could be dropped in as a simple fix or something more advanced like a custom cache wrapper if needed.
|
0.0
|
7dcee9e68756442effb087c97823b444dde74a0a
|
[
"premailer/tests/test_premailer.py::Tests::test_load_external_url_with_custom_session"
] |
[
"premailer/tests/test_premailer.py::Tests::test_3_digit_color_expand",
"premailer/tests/test_premailer.py::Tests::test_align_float_images",
"premailer/tests/test_premailer.py::Tests::test_allow_loading_external_files",
"premailer/tests/test_premailer.py::Tests::test_apple_newsletter_example",
"premailer/tests/test_premailer.py::Tests::test_base_url_fixer",
"premailer/tests/test_premailer.py::Tests::test_base_url_ignore_links",
"premailer/tests/test_premailer.py::Tests::test_base_url_with_path",
"premailer/tests/test_premailer.py::Tests::test_basic_html",
"premailer/tests/test_premailer.py::Tests::test_basic_html_argument_wrong",
"premailer/tests/test_premailer.py::Tests::test_basic_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_basic_html_with_pseudo_selector",
"premailer/tests/test_premailer.py::Tests::test_basic_xml",
"premailer/tests/test_premailer.py::Tests::test_broken_xml",
"premailer/tests/test_premailer.py::Tests::test_capture_cssutils_logging",
"premailer/tests/test_premailer.py::Tests::test_child_selector",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_argument",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_stdin",
"premailer/tests/test_premailer.py::Tests::test_command_line_preserve_style_tags",
"premailer/tests/test_premailer.py::Tests::test_comments_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_css_disable_basic_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_disable_leftover_css",
"premailer/tests/test_premailer.py::Tests::test_css_text",
"premailer/tests/test_premailer.py::Tests::test_css_text_with_only_body_present",
"premailer/tests/test_premailer.py::Tests::test_css_with_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_excluded",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_included",
"premailer/tests/test_premailer.py::Tests::test_disabled_validator",
"premailer/tests/test_premailer.py::Tests::test_doctype",
"premailer/tests/test_premailer.py::Tests::test_empty_style_tag",
"premailer/tests/test_premailer.py::Tests::test_external_links",
"premailer/tests/test_premailer.py::Tests::test_external_links_disallow_network",
"premailer/tests/test_premailer.py::Tests::test_external_links_unfindable",
"premailer/tests/test_premailer.py::Tests::test_external_styles_and_links",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_http",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_https",
"premailer/tests/test_premailer.py::Tests::test_external_styles_with_base_url",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_class_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_element_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_id_over_others",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_important_over_others",
"premailer/tests/test_premailer.py::Tests::test_fontface_selectors_with_no_selectortext",
"premailer/tests/test_premailer.py::Tests::test_ignore_does_not_strip_importants",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_external_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_incorrectly",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_inline_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_style_elements_with_media_attribute",
"premailer/tests/test_premailer.py::Tests::test_include_star_selector",
"premailer/tests/test_premailer.py::Tests::test_inline_important",
"premailer/tests/test_premailer.py::Tests::test_inline_wins_over_external",
"premailer/tests/test_premailer.py::Tests::test_instance_reuse",
"premailer/tests/test_premailer.py::Tests::test_keyframe_selectors",
"premailer/tests/test_premailer.py::Tests::test_kwargs_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_last_child",
"premailer/tests/test_premailer.py::Tests::test_last_child_exclude_pseudo",
"premailer/tests/test_premailer.py::Tests::test_leftover_important",
"premailer/tests/test_premailer.py::Tests::test_links_without_protocol",
"premailer/tests/test_premailer.py::Tests::test_load_external_url",
"premailer/tests/test_premailer.py::Tests::test_load_external_url_404",
"premailer/tests/test_premailer.py::Tests::test_load_external_url_no_insecure_ssl",
"premailer/tests/test_premailer.py::Tests::test_load_external_url_with_insecure_ssl",
"premailer/tests/test_premailer.py::Tests::test_mailto_url",
"premailer/tests/test_premailer.py::Tests::test_mediaquery",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_basic",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_non_trivial",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_class",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_unset",
"premailer/tests/test_premailer.py::Tests::test_mixed_pseudo_selectors",
"premailer/tests/test_premailer.py::Tests::test_multiple_style_elements",
"premailer/tests/test_premailer.py::Tests::test_multithreading",
"premailer/tests/test_premailer.py::Tests::test_parse_style_rules",
"premailer/tests/test_premailer.py::Tests::test_precedence_comparison",
"premailer/tests/test_premailer.py::Tests::test_prefer_inline_to_class",
"premailer/tests/test_premailer.py::Tests::test_pseudo_selectors_without_selector",
"premailer/tests/test_premailer.py::Tests::test_remove_classes",
"premailer/tests/test_premailer.py::Tests::test_remove_unset_properties",
"premailer/tests/test_premailer.py::Tests::test_six_color",
"premailer/tests/test_premailer.py::Tests::test_strip_important",
"premailer/tests/test_premailer.py::Tests::test_style_attribute_specificity",
"premailer/tests/test_premailer.py::Tests::test_style_block_with_external_urls",
"premailer/tests/test_premailer.py::Tests::test_tel_url",
"premailer/tests/test_premailer.py::Tests::test_turnoff_cache_works_as_expected",
"premailer/tests/test_premailer.py::Tests::test_type_test",
"premailer/tests/test_premailer.py::Tests::test_unknown_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_uppercase_margin",
"premailer/tests/test_premailer.py::Tests::test_xml_cdata"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-07 21:04:25+00:00
|
bsd-3-clause
| 4,523 |
|
petl-developers__petl-408
|
diff --git a/docs/changes.rst b/docs/changes.rst
index efa84aa..1a5245d 100644
--- a/docs/changes.rst
+++ b/docs/changes.rst
@@ -13,6 +13,10 @@ Version 1.3.0
`"inline"` (use any exceptions as the output value). The default value is `False` which
maintains compatibility with previous releases. By :user:`bmaggard`, :issue:`460`,
:issue:`406`, :issue:`365`.
+
+* A new function :func:`petl.util.timing.log_progress` has been added, which behaves
+ in a similar way to :func:`petl.util.timing.progress` but writes to a Python logger.
+ By :user:`dusktreader`, :issue:`408`, :issue:`407`.
* Pass through keyword arguments to :func:`xlrd.open_workbook`. By :user:`gjunqueira`,
:issue:`470`, :issue:`473`.
diff --git a/petl/util/__init__.py b/petl/util/__init__.py
index 2f66b48..5eb62fd 100644
--- a/petl/util/__init__.py
+++ b/petl/util/__init__.py
@@ -21,7 +21,7 @@ from petl.util.counting import parsecounter, parsecounts, typecounter, \
from petl.util.materialise import listoflists, listoftuples, tupleoflists, \
tupleoftuples, columns, facetcolumns
-from petl.util.timing import progress, clock
+from petl.util.timing import progress, log_progress, clock
from petl.util.statistics import limits, stats
diff --git a/petl/util/timing.py b/petl/util/timing.py
index c3b564c..5d3a20c 100644
--- a/petl/util/timing.py
+++ b/petl/util/timing.py
@@ -1,6 +1,8 @@
from __future__ import absolute_import, print_function, division
+import abc
+import logging
import sys
import time
@@ -9,9 +11,12 @@ from petl.util.base import Table
from petl.util.statistics import onlinestats
-def progress(table, batchsize=1000, prefix="", out=sys.stderr):
+def progress(table, batchsize=1000, prefix="", out=None):
"""
- Report progress on rows passing through. E.g.::
+ Report progress on rows passing through to a file or file-like object
+ (defaults to sys.stderr)
+
+ E.g.::
>>> import petl as etl
>>> table = etl.dummytable(100000)
@@ -35,16 +40,52 @@ def progress(table, batchsize=1000, prefix="", out=sys.stderr):
return ProgressView(table, batchsize, prefix, out)
+def log_progress(table, batchsize=1000, prefix="", logger=None, level=logging.INFO):
+ """
+ Report progress on rows passing through to a python logger. If logger is
+ none, a new logger will be created that, by default, streams to stdout
+
+ E.g.::
+
+ >>> import petl as etl
+ >>> table = etl.dummytable(100000)
+ >>> table.log_progress(10000).tocsv('example.csv')
+ 10000 rows in 0.13s (78363 row/s); batch in 0.13s (78363 row/s)
+ 20000 rows in 0.22s (91679 row/s); batch in 0.09s (110448 row/s)
+ 30000 rows in 0.31s (96573 row/s); batch in 0.09s (108114 row/s)
+ 40000 rows in 0.40s (99535 row/s); batch in 0.09s (109625 row/s)
+ 50000 rows in 0.49s (101396 row/s); batch in 0.09s (109591 row/s)
+ 60000 rows in 0.59s (102245 row/s); batch in 0.09s (106709 row/s)
+ 70000 rows in 0.68s (103221 row/s); batch in 0.09s (109498 row/s)
+ 80000 rows in 0.77s (103810 row/s); batch in 0.09s (108126 row/s)
+ 90000 rows in 0.90s (99465 row/s); batch in 0.13s (74516 row/s)
+ 100000 rows in 1.02s (98409 row/s); batch in 0.11s (89821 row/s)
+ 100000 rows in 1.02s (98402 row/s); batches in 0.10 +/- 0.02s [0.09-0.13] (100481 +/- 13340 rows/s [74516-110448])
+
+ See also :func:`petl.util.timing.clock`.
+
+ """
+
+ return LoggingProgressView(table, batchsize, prefix, logger, level=level)
+
+
Table.progress = progress
+Table.log_progress = log_progress
-class ProgressView(Table):
+class ProgressViewBase(Table):
+ """
+ Abstract base class for reporting on proecessing status
+ """
- def __init__(self, inner, batchsize, prefix, out):
+ def __init__(self, inner, batchsize, prefix):
self.inner = inner
self.batchsize = batchsize
self.prefix = prefix
- self.out = out
+
+ @abc.abstractmethod
+ def print_message(self, message):
+ pass
def __iter__(self):
start = time.time()
@@ -76,9 +117,7 @@ class ProgressView(Table):
message = self.prefix + \
'%s rows in %.2fs (%s row/s); ' \
'batch in %.2fs (%s row/s)' % v
- print(message, file=self.out)
- if hasattr(self.out, 'flush'):
- self.out.flush()
+ self.print_message(message)
batchstart = batchend
batchtimemean, batchtimevar = \
onlinestats(batchtime, batchn, mean=batchtimemean,
@@ -120,9 +159,43 @@ class ProgressView(Table):
v = (n, elapsedtime, rate)
message = self.prefix + '%s rows in %.2fs (%s row/s)' % v
- print(message, file=self.out)
- if hasattr(self.out, 'flush'):
- self.out.flush()
+ self.print_message(message)
+
+
+class ProgressView(ProgressViewBase):
+ """
+ Reports progress to a file_object like sys.stdout or a file handler
+ """
+
+ def __init__(self, inner, batchsize, prefix, out):
+ if out is None:
+ self.file_object = sys.stderr
+ else:
+ self.file_object = out
+ super(ProgressView, self).__init__(inner, batchsize, prefix)
+
+ def print_message(self, message):
+ print(message, file=self.file_object)
+ if hasattr(self.file_object, 'flush'):
+ self.file_object.flush()
+
+
+class LoggingProgressView(ProgressViewBase):
+ """
+ Reports progress to a logger, log handler, or log adapter
+ """
+
+ def __init__(self, inner, batchsize, prefix, logger, level=logging.INFO):
+ if logger is None:
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(level)
+ else:
+ self.logger = logger
+ self.level = level
+ super(LoggingProgressView, self).__init__(inner, batchsize, prefix)
+
+ def print_message(self, message):
+ self.logger.log(self.level, message)
def clock(table):
|
petl-developers/petl
|
3a61ad9395f603d58d28c534817587c373f0113f
|
diff --git a/petl/test/util/test_timing.py b/petl/test/util/test_timing.py
index 33e262c..418cc88 100644
--- a/petl/test/util/test_timing.py
+++ b/petl/test/util/test_timing.py
@@ -2,7 +2,7 @@ from __future__ import absolute_import, print_function, division
from petl.util.counting import nrows
-from petl.util.timing import progress
+from petl.util.timing import progress, log_progress
def test_progress():
@@ -13,3 +13,10 @@ def test_progress():
('b', 3))
nrows(progress(table))
+def test_log_progress():
+ # make sure log_progress doesn't raise exception
+ table = (('foo', 'bar', 'baz'),
+ ('a', 1, True),
+ ('b', 2, True),
+ ('b', 3))
+ nrows(log_progress(table))
|
Add progress function that uses a logging handler
It would be nice to be able to direct the progress funciton (or a similar function) to a logging handler instead of a i/o stream.
|
0.0
|
3a61ad9395f603d58d28c534817587c373f0113f
|
[
"petl/test/util/test_timing.py::test_progress",
"petl/test/util/test_timing.py::test_log_progress"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-01-20 23:31:32+00:00
|
mit
| 4,524 |
|
petl-developers__petl-634
|
diff --git a/docs/contributing.rst b/docs/contributing.rst
index d1952bf..c604472 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -29,7 +29,7 @@ suite with::
$ pip install -r requirements-tests.txt
$ pytest -v petl
-Currently :mod:`petl` supports Python 2.7, 3.6 up to 3.10
+Currently :mod:`petl` supports Python 2.7, 3.6 up to 3.11
so the tests should pass under all these Python versions.
Dependencies
diff --git a/petl/io/sources.py b/petl/io/sources.py
index 108cdbd..0704f1f 100644
--- a/petl/io/sources.py
+++ b/petl/io/sources.py
@@ -436,9 +436,7 @@ def _get_handler_from(source, handlers):
def _resolve_source_from_arg(source, handlers):
- if source is None:
- return StdinSource()
- elif isinstance(source, string_types):
+ if isinstance(source, string_types):
handler = _get_handler_from(source, handlers)
codec = _get_codec_for(source)
if handler is None:
@@ -464,6 +462,8 @@ def read_source_from_arg(source):
.. versionadded:: 1.4.0
'''
+ if source is None:
+ return StdinSource()
return _resolve_source_from_arg(source, _READERS)
@@ -477,4 +477,6 @@ def write_source_from_arg(source, mode='wb'):
.. versionadded:: 1.4.0
'''
+ if source is None:
+ return StdoutSource()
return _resolve_source_from_arg(source, _WRITERS)
diff --git a/requirements-formats.txt b/requirements-formats.txt
index c365dd9..04d8fc2 100644
--- a/requirements-formats.txt
+++ b/requirements-formats.txt
@@ -5,10 +5,12 @@ intervaltree>=3.0.2
lxml>=4.6.5
openpyxl>=2.6.2
pandas
-tables
Whoosh>=2.7.4
xlrd>=2.0.1
xlwt>=1.3.0
fastavro>=0.24.2 ; python_version >= '3.4'
fastavro==0.24.2 ; python_version < '3.0'
gspread>=3.4.0 ; python_version >= '3.4'
+
+# version 3.7.0 doesn't work yet with python3.11
+tables ; python_version != '3.11'
diff --git a/requirements-optional.txt b/requirements-optional.txt
index b8375a8..b46163e 100644
--- a/requirements-optional.txt
+++ b/requirements-optional.txt
@@ -1,7 +1,14 @@
-# packages bellow need complex local setup
-# throubleshooting:
-# 1. define the following variable before running pip:
-# $ export DISABLE_BLOSC_AVX2=1
-# 2. pip install --prefer-binary bcolz
+# Packages bellow need complex local setup #
+# Also check: .github/workflows/test-changes.yml
+
+# Throubleshooting:
+# 1. $ export DISABLE_BLOSC_AVX2=1
+# 2. $ brew install c-blosc
+
blosc ; python_version >= '3.7'
-bcolz ; python_version >= '3.7'
+
+# Throubleshooting:
+# 1. pip install --prefer-binary -r requirements-optional.txt
+# 2. pip install --prefer-binary bcolz
+
+bcolz ; python_version >= '3.7' and python_version < '3.10'
diff --git a/setup.py b/setup.py
index 47dbe09..5050a6f 100644
--- a/setup.py
+++ b/setup.py
@@ -47,6 +47,8 @@ setup(
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
diff --git a/tox.ini b/tox.ini
index 683280c..5e5cf38 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,14 +4,14 @@
# and then run "tox" from this directory.
[tox]
-envlist = py27, py36, py37, py38, py39, py310, {py36,py37,py38,py39,py310}-docs
+envlist = py27, py36, py37, py38, py39, py310, py311, {py36,py37,py38,py39,py310,py311}-docs
[testenv]
# get stable output for unordered types
setenv =
PYTHONHASHSEED = 42
py27: PY_MAJOR_VERSION = py2
- py36,py37,py38,py39,py310: PY_MAJOR_VERSION = py3
+ py36,py37,py38,py39,py310,py311: PY_MAJOR_VERSION = py3
commands =
pytest --cov=petl petl
coverage report -m
@@ -19,7 +19,7 @@ deps =
-rrequirements-tests.txt
-rrequirements-formats.txt
-[testenv:{py36,py37,py38,py39}-docs]
+[testenv:{py36,py37,py38,py39,py310,py311}-docs]
# build documentation under similar environment to readthedocs
changedir = docs
deps =
@@ -27,9 +27,9 @@ deps =
commands =
sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
-[testenv:{py36,py37,py38,py39,py310}-doctest]
+[testenv:{py36,py37,py38,py39,py310,py311}-doctest]
commands =
- py36,py37,py38,py39,py310: pytest --doctest-modules --cov=petl petl
+ py36,py37,py38,py39,py310,py311: pytest --doctest-modules --cov=petl petl
[testenv:{py36,py37,py38,py39}-dochtml]
changedir = docs
deps =
|
petl-developers/petl
|
9f065ba647a86b8fcc183ed7c752d347382a8707
|
diff --git a/.github/workflows/test-changes.yml b/.github/workflows/test-changes.yml
index bce0946..e2720a9 100644
--- a/.github/workflows/test-changes.yml
+++ b/.github/workflows/test-changes.yml
@@ -27,7 +27,7 @@ jobs:
fail-fast: false
matrix:
os: [ "ubuntu-latest", "windows-latest", "macos-latest" ]
- python: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10']
+ python: ['2.7', '3.6', '3.7', '3.8', '3.9', '3.10', '3.11']
runs-on: "${{ matrix.os }}"
diff --git a/petl/test/io/test_sources.py b/petl/test/io/test_sources.py
index f1e1baf..c2cbff4 100644
--- a/petl/test/io/test_sources.py
+++ b/petl/test/io/test_sources.py
@@ -92,6 +92,17 @@ def test_stdoutsource():
etl.topickle(tbl, StdoutSource())
+def test_stdoutsource_none(capfd):
+
+ tbl = [('foo', 'bar'), ('a', 1), ('b', 2)]
+ etl.tocsv(tbl, encoding='ascii')
+ captured = capfd.readouterr()
+ outp = captured.out
+ # TODO: capfd works on vscode but not in console/tox
+ if outp:
+ assert outp in ( 'foo,bar\r\na,1\r\nb,2\r\n' , 'foo,bar\na,1\nb,2\n' )
+
+
def test_stdoutsource_unicode():
tbl = [('foo', 'bar'),
|
tocsv(source=None) does not output to stdout
#### Minimal, reproducible code sample, a copy-pastable example if possible
```python
petl.wrap([...]).tocsv()
```
#### Problem description
The documentation says
```
Load - writing tables to files and databases
The following functions write data from a table to a file-like source or database. For functions that accept a source argument, if the source argument is None or a string it is interpreted as follows:
None - write to stdout
...
```
I've never found this to work, and always work around it.
I looked into it this time briefly, and it looks like a blatant oversight.
The `_resolve_source_from_arg` function gets called from both the read_source_from_arg() and write_source_from_arg() functions below it and does this:
```
def _resolve_source_from_arg(source, handlers):
if source is None:
return StdinSource()
...
```
You need to move the conditional within each of the two functions that call it (simple solution).
Probably should be prioritized high... this looks pretty bad for newcomers to the library, as this is library one of the first things someone might do.
#### Version and installation information
1.7.11
|
0.0
|
9f065ba647a86b8fcc183ed7c752d347382a8707
|
[
"petl/test/io/test_sources.py::test_stdoutsource_none"
] |
[
"petl/test/io/test_sources.py::test_memorysource",
"petl/test/io/test_sources.py::test_memorysource_2",
"petl/test/io/test_sources.py::test_popensource",
"petl/test/io/test_sources.py::test_zipsource",
"petl/test/io/test_sources.py::test_stdoutsource",
"petl/test/io/test_sources.py::test_stdoutsource_unicode",
"petl/test/io/test_sources.py::test_gzipsource",
"petl/test/io/test_sources.py::test_bzip2source"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-22 17:51:30+00:00
|
mit
| 4,525 |
|
petl-developers__petl-656
|
diff --git a/docs/changes.rst b/docs/changes.rst
index 3d83bfd..5ff55eb 100644
--- a/docs/changes.rst
+++ b/docs/changes.rst
@@ -1,6 +1,15 @@
Changes
=======
+Version 1.7.15
+--------------
+
+* Add unit tests for randomtable, dummytable, and their supporting functions and classes.
+ By :user:`bmos`, :issue:`657`.
+
+* Fix: DeprecationWarning: Seeding based on hashing is deprecated since Python 3.9 and will be removed in a subsequent version.
+ By :user:`bmos`, :issue:`657`.
+
Version 1.7.14
--------------
diff --git a/petl/util/random.py b/petl/util/random.py
index 94fa758..7485abf 100644
--- a/petl/util/random.py
+++ b/petl/util/random.py
@@ -1,15 +1,24 @@
from __future__ import absolute_import, print_function, division
-
-import datetime
-import random
+import hashlib
+import random as pyrandom
import time
from collections import OrderedDict
from functools import partial
+
from petl.compat import xrange, text_type
+from petl.util.base import Table
-from petl.util.base import Table
+def randomseed():
+ """
+ Obtain the hex digest of a sha256 hash of the
+ current epoch time in nanoseconds.
+ """
+
+ time_ns = str(time.time()).encode()
+ hash_time = hashlib.sha256(time_ns).hexdigest()
+ return hash_time
def randomtable(numflds=5, numrows=100, wait=0, seed=None):
@@ -36,9 +45,11 @@ def randomtable(numflds=5, numrows=100, wait=0, seed=None):
| 0.026535969683863625 | 0.1988376506866485 | 0.6498844377795232 |
+----------------------+----------------------+---------------------+
...
+ <BLANKLINE>
Note that the data are generated on the fly and are not stored in memory,
so this function can be used to simulate very large tables.
+ The only supported seed types are: None, int, float, str, bytes, and bytearray.
"""
@@ -46,27 +57,25 @@ def randomtable(numflds=5, numrows=100, wait=0, seed=None):
class RandomTable(Table):
-
def __init__(self, numflds=5, numrows=100, wait=0, seed=None):
self.numflds = numflds
self.numrows = numrows
self.wait = wait
if seed is None:
- self.seed = datetime.datetime.now()
+ self.seed = randomseed()
else:
self.seed = seed
def __iter__(self):
-
nf = self.numflds
nr = self.numrows
seed = self.seed
# N.B., we want this to be stable, i.e., same data each time
- random.seed(seed)
+ pyrandom.seed(seed)
# construct fields
- flds = ['f%s' % n for n in range(nf)]
+ flds = ["f%s" % n for n in range(nf)]
yield tuple(flds)
# construct data rows
@@ -74,18 +83,22 @@ class RandomTable(Table):
# artificial delay
if self.wait:
time.sleep(self.wait)
- yield tuple(random.random() for n in range(nf))
+ yield tuple(pyrandom.random() for n in range(nf))
def reseed(self):
- self.seed = datetime.datetime.now()
-
-
-def dummytable(numrows=100,
- fields=(('foo', partial(random.randint, 0, 100)),
- ('bar', partial(random.choice, ('apples', 'pears',
- 'bananas', 'oranges'))),
- ('baz', random.random)),
- wait=0, seed=None):
+ self.seed = randomseed()
+
+
+def dummytable(
+ numrows=100,
+ fields=(
+ ('foo', partial(pyrandom.randint, 0, 100)),
+ ('bar', partial(pyrandom.choice, ('apples', 'pears', 'bananas', 'oranges'))),
+ ('baz', pyrandom.random),
+ ),
+ wait=0,
+ seed=None,
+):
"""
Construct a table with dummy data. Use `numrows` to specify the number of
rows. Set `wait` to a float greater than zero to simulate a delay on each
@@ -108,14 +121,13 @@ def dummytable(numrows=100,
| 4 | 'apples' | 0.09369523986159245 |
+-----+----------+----------------------+
...
+ <BLANKLINE>
- >>> # customise fields
- ... import random
+ >>> import random as pyrandom
>>> from functools import partial
- >>> fields = [('foo', random.random),
- ... ('bar', partial(random.randint, 0, 500)),
- ... ('baz', partial(random.choice,
- ... ['chocolate', 'strawberry', 'vanilla']))]
+ >>> fields = [('foo', pyrandom.random),
+ ... ('bar', partial(pyrandom.randint, 0, 500)),
+ ... ('baz', partial(pyrandom.choice, ['chocolate', 'strawberry', 'vanilla']))]
>>> table2 = etl.dummytable(100, fields=fields, seed=42)
>>> table2
+---------------------+-----+-------------+
@@ -132,12 +144,19 @@ def dummytable(numrows=100,
| 0.4219218196852704 | 15 | 'chocolate' |
+---------------------+-----+-------------+
...
+ <BLANKLINE>
+
+ >>> table3_1 = etl.dummytable(50)
+ >>> table3_2 = etl.dummytable(100)
+ >>> table3_1[5] == table3_2[5]
+ False
Data generation functions can be specified via the `fields` keyword
argument.
Note that the data are generated on the fly and are not stored in memory,
so this function can be used to simulate very large tables.
+ The only supported seed types are: None, int, float, str, bytes, and bytearray.
"""
@@ -145,7 +164,6 @@ def dummytable(numrows=100,
class DummyTable(Table):
-
def __init__(self, numrows=100, fields=None, wait=0, seed=None):
self.numrows = numrows
self.wait = wait
@@ -154,7 +172,7 @@ class DummyTable(Table):
else:
self.fields = OrderedDict(fields)
if seed is None:
- self.seed = datetime.datetime.now()
+ self.seed = randomseed()
else:
self.seed = seed
@@ -167,7 +185,7 @@ class DummyTable(Table):
fields = self.fields.copy()
# N.B., we want this to be stable, i.e., same data each time
- random.seed(seed)
+ pyrandom.seed(seed)
# construct header row
hdr = tuple(text_type(f) for f in fields.keys())
@@ -181,4 +199,4 @@ class DummyTable(Table):
yield tuple(fields[f]() for f in fields)
def reseed(self):
- self.seed = datetime.datetime.now()
+ self.seed = randomseed()
|
petl-developers/petl
|
e829532e2ed350d00b96680d2d6774dec4a7f2e0
|
diff --git a/petl/test/util/test_random.py b/petl/test/util/test_random.py
new file mode 100644
index 0000000..7c4f420
--- /dev/null
+++ b/petl/test/util/test_random.py
@@ -0,0 +1,93 @@
+import random as pyrandom
+import time
+from functools import partial
+
+from petl.util.random import randomseed, randomtable, RandomTable, dummytable, DummyTable
+
+
+def test_randomseed():
+ """
+ Ensure that randomseed provides a non-empty string that changes.
+ """
+ seed_1 = randomseed()
+ time.sleep(1)
+ seed_2 = randomseed()
+
+ assert isinstance(seed_1, str)
+ assert seed_1 != ""
+ assert seed_1 != seed_2
+
+
+def test_randomtable():
+ """
+ Ensure that randomtable provides a table with the right number of rows and columns.
+ """
+ columns, rows = 3, 10
+ table = randomtable(columns, rows)
+
+ assert len(table[0]) == columns
+ assert len(table) == rows + 1
+
+
+def test_randomtable_class():
+ """
+ Ensure that RandomTable provides a table with the right number of rows and columns.
+ """
+ columns, rows = 4, 60
+ table = RandomTable(numflds=columns, numrows=rows)
+
+ assert len(table[0]) == columns
+ assert len(table) == rows + 1
+
+
+def test_dummytable_custom_fields():
+ """
+ Ensure that dummytable provides a table with the right number of rows
+ and that it accepts and uses custom column names provided.
+ """
+ columns = (
+ ('count', partial(pyrandom.randint, 0, 100)),
+ ('pet', partial(pyrandom.choice, ['dog', 'cat', 'cow', ])),
+ ('color', partial(pyrandom.choice, ['yellow', 'orange', 'brown'])),
+ ('value', pyrandom.random),
+ )
+ rows = 35
+
+ table = dummytable(numrows=rows, fields=columns)
+ assert table[0] == ('count', 'pet', 'color', 'value')
+ assert len(table) == rows + 1
+
+
+def test_dummytable_no_seed():
+ """
+ Ensure that dummytable provides a table with the right number of rows
+ and columns when not provided with a seed.
+ """
+ rows = 35
+
+ table = dummytable(numrows=rows)
+ assert len(table[0]) == 3
+ assert len(table) == rows + 1
+
+
+def test_dummytable_int_seed():
+ """
+ Ensure that dummytable provides a table with the right number of rows
+ and columns when provided with an integer as a seed.
+ """
+ rows = 35
+ seed = 42
+ table = dummytable(numrows=rows, seed=seed)
+ assert len(table[0]) == 3
+ assert len(table) == rows + 1
+
+
+def test_dummytable_class():
+ """
+ Ensure that DummyTable provides a table with the right number of rows
+ and columns.
+ """
+ rows = 70
+ table = DummyTable(numrows=rows)
+
+ assert len(table) == rows + 1
|
DeprecationWarning / TypeError: Seeding based on hashing is deprecated/removed
## Problem description
### What's happenning
When creating a dummytable, the warning is emitted:
> DeprecationWarning: Seeding based on hashing is deprecated since Python 3.9 and will be removed in a subsequent version. The only supported seed types are: None, int, float, str, bytes, and bytearray.
Furthermore, when using the same code on 3.11 or 3.12, I get this error instead:
```
self = <random.Random object at 0x564b245ce8f0>
a = datetime.datetime(2024, 3, 11, 1, 27, 10, 858705), version = 2
def seed(self, a=None, version=2):
"""Initialize internal state from a seed.
The only supported seed types are None, int, float,
str, bytes, and bytearray.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If *a* is an int, all bits are used.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1 (provided for reproducing random
sequences from older versions of Python), the algorithm for str and
bytes generates a narrower range of seeds.
"""
if version == 1 and isinstance(a, (str, bytes)):
a = a.decode('latin-1') if isinstance(a, bytes) else a
x = ord(a[0]) << 7 if a else 0
for c in map(ord, a):
x = ((1000003 * x) ^ c) & 0xFFFFFFFFFFFFFFFF
x ^= len(a)
a = -2 if x == -1 else x
elif version == 2 and isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a = int.from_bytes(a + _sha512(a).digest())
elif not isinstance(a, (type(None), int, float, str, bytes, bytearray)):
> raise TypeError('The only supported seed types are: None,\n'
'int, float, str, bytes, and bytearray.')
E TypeError: The only supported seed types are: None,
E int, float, str, bytes, and bytearray.
/opt/hostedtoolcache/Python/3.11.8/x64/lib/python3.11/random.py:160: TypeError
```
### Expected behavior
The dummytable should be created with no warning or error.
## Scenario for reprodution
### Reproducible test case
Please provide a minimal, reproducible code sample, a copy-pastable example if possible:
_I haven't been able to make one yet, but changing this in the installed copy of petl in my .venv folder resolves the warning._
### Version and installation information
Please provide the following:
- Value of ``petl.__version__``: **1.7.14**
- Version information for any third-party package dependencies that are relevant
- Version of Python interpreter: **3.10**
- Operating system (Linux/Windows/Mac): **Linux Pop!_OS 22.04 LTS**
- How petl was installed (e.g., "using pip into virtual environment", or "using conda"): **uv pip install .venv**
Also, if you think it might be relevant, please provide the output from ``pip freeze`` or
``conda env export`` depending on which was used to install petl.
### Additional context
Add any other context about the problem here.
Also, feel free to remove all sections and text that aren't relevant.
|
0.0
|
e829532e2ed350d00b96680d2d6774dec4a7f2e0
|
[
"petl/test/util/test_random.py::test_randomseed",
"petl/test/util/test_random.py::test_randomtable",
"petl/test/util/test_random.py::test_randomtable_class",
"petl/test/util/test_random.py::test_dummytable_custom_fields",
"petl/test/util/test_random.py::test_dummytable_no_seed",
"petl/test/util/test_random.py::test_dummytable_int_seed",
"petl/test/util/test_random.py::test_dummytable_class"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-11 00:30:53+00:00
|
mit
| 4,526 |
|
petrobras__ross-925
|
diff --git a/ross/__init__.py b/ross/__init__.py
index fd0f04f..7a42d0d 100644
--- a/ross/__init__.py
+++ b/ross/__init__.py
@@ -8,6 +8,7 @@ from .defects import *
from .disk_element import *
from .materials import *
from .point_mass import *
+from .probe import *
from .results import *
from .rotor_assembly import *
from .shaft_element import *
diff --git a/ross/probe.py b/ross/probe.py
new file mode 100644
index 0000000..78ecb9e
--- /dev/null
+++ b/ross/probe.py
@@ -0,0 +1,49 @@
+from ross.units import Q_, check_units
+
+
+class Probe:
+ """Class of a probe.
+
+ This class will create a probe object to be used in the rotor model.
+ The probe is used to measure the response of the rotor at a specific
+ location and orientation.
+
+ Parameters
+ ----------
+ node : int
+ Indicate the node where the probe is located.
+ angle : float, pint.Quantity
+ Probe orientation angle about the shaft (rad).
+ tag : str, optional
+ A tag to name the element.
+
+ Example
+ -------
+ >>> import ross as rs
+ >>> probe1 = rs.Probe(10, Q_(45, "degree"), "Probe Drive End - X")
+ >>> probe1.node
+ 10
+ >>> probe1.angle
+ 0.7853981633974483
+ """
+
+ @check_units
+ def __init__(self, node, angle, tag=None):
+ self.node = node
+ self.angle = angle
+ if tag is None:
+ self.tag = f"Probe - Node {self.node}, Angle {self.angle}"
+ else:
+ self.tag = tag
+
+ @property
+ def info(self):
+ return self.node, self.angle
+
+ def __str__(self):
+ return (
+ f"Probe {self.tag}"
+ f'\n{20*"-"}'
+ f"\nNode location : {self.node}"
+ f"\nProbe orientation angle (rad) : {self.angle}"
+ )
diff --git a/ross/results.py b/ross/results.py
index e653403..129042b 100644
--- a/ross/results.py
+++ b/ross/results.py
@@ -7,6 +7,7 @@ import inspect
from abc import ABC
from collections.abc import Iterable
from pathlib import Path
+from warnings import warn
import numpy as np
import pandas as pd
@@ -2083,16 +2084,7 @@ class ForcedResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
+ List with rs.Probe objects.
probe_units : str, option
Units for probe orientation.
Default is "rad".
@@ -2135,24 +2127,39 @@ class ForcedResponseResults(Results):
for i, p in enumerate(probe):
amplitude = []
for speed_idx in range(len(self.speed_range)):
+ # first try to get the angle from the probe object
try:
- angle = Q_(p[1], probe_units).to("rad").m
- except TypeError:
- angle = p[1]
+ angle = p.angle
+ node = p.node
+ # if it is a tuple, warn the user that the use of tuples is deprecated
+ except AttributeError:
+ try:
+ angle = Q_(p[1], probe_units).to("rad").m
+ warn(
+ "The use of tuples in the probe argument is deprecated. Use the Probe class instead.",
+ DeprecationWarning,
+ )
+ node = p[0]
+ except TypeError:
+ angle = p[1]
+ node = p[0]
ru_e, rv_e = response[:, speed_idx][
- self.rotor.number_dof * p[0] : self.rotor.number_dof * p[0] + 2
+ self.rotor.number_dof * node : self.rotor.number_dof * node + 2
]
orbit = Orbit(
- node=p[0], node_pos=self.rotor.nodes_pos[p[0]], ru_e=ru_e, rv_e=rv_e
+ node=node, node_pos=self.rotor.nodes_pos[node], ru_e=ru_e, rv_e=rv_e
)
amp, phase = orbit.calculate_amplitude(angle=angle)
amplitude.append(amp)
try:
- probe_tag = p[2]
- except IndexError:
- probe_tag = f"Probe {i+1} - Node {p[0]}"
+ probe_tag = p.tag
+ except AttributeError:
+ try:
+ probe_tag = p[2]
+ except IndexError:
+ probe_tag = f"Probe {i+1} - Node {p[0]}"
data[probe_tag] = Q_(amplitude, base_unit).to(amplitude_units).m
@@ -2173,16 +2180,7 @@ class ForcedResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
+ List with rs.Probe objects.
probe_units : str, option
Units for probe orientation.
Default is "rad".
@@ -2228,24 +2226,39 @@ class ForcedResponseResults(Results):
for i, p in enumerate(probe):
phase_values = []
for speed_idx in range(len(self.speed_range)):
+ # first try to get the angle from the probe object
try:
- angle = Q_(p[1], probe_units).to("rad").m
- except TypeError:
- angle = p[1]
+ angle = p.angle
+ node = p.node
+ # if it is a tuple, warn the user that the use of tuples is deprecated
+ except AttributeError:
+ try:
+ angle = Q_(p[1], probe_units).to("rad").m
+ warn(
+ "The use of tuples in the probe argument is deprecated. Use the Probe class instead.",
+ DeprecationWarning,
+ )
+ node = p[0]
+ except TypeError:
+ angle = p[1]
+ node = p[0]
ru_e, rv_e = response[:, speed_idx][
- self.rotor.number_dof * p[0] : self.rotor.number_dof * p[0] + 2
+ self.rotor.number_dof * node : self.rotor.number_dof * node + 2
]
orbit = Orbit(
- node=p[0], node_pos=self.rotor.nodes_pos[p[0]], ru_e=ru_e, rv_e=rv_e
+ node=node, node_pos=self.rotor.nodes_pos[node], ru_e=ru_e, rv_e=rv_e
)
amp, phase = orbit.calculate_amplitude(angle=angle)
phase_values.append(phase)
try:
- probe_tag = p[2]
- except IndexError:
- probe_tag = f"Probe {i+1} - Node {p[0]}"
+ probe_tag = p.tag
+ except AttributeError:
+ try:
+ probe_tag = p[2]
+ except IndexError:
+ probe_tag = f"Probe {i+1} - Node {p[0]}"
data[probe_tag] = Q_(phase_values, "rad").to(phase_units).m
@@ -2267,17 +2280,8 @@ class ForcedResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
- probe_units : str, option
+ List with rs.Probe objects.
+ probe_units : str, optional
Units for probe orientation.
Default is "rad".
frequency_units : str, optional
@@ -2352,17 +2356,8 @@ class ForcedResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
- probe_units : str, option
+ List with rs.Probe objects.
+ probe_units : str, optional
Units for probe orientation.
Default is "rad".
frequency_units : str, optional
@@ -2440,17 +2435,8 @@ class ForcedResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
- probe_units : str, option
+ List with rs.Probe objects.
+ probe_units : str, optional
Units for probe orientation.
Default is "rad".
frequency_units : str, optional
@@ -2548,17 +2534,8 @@ class ForcedResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
- probe_units : str, option
+ List with rs.Probe objects.
+ probe_units : str, optional
Units for probe orientation.
Default is "rad".
frequency_units : str, optional
@@ -4183,16 +4160,7 @@ class TimeResponseResults(Results):
Parameters
----------
probe : list
- List with tuples (node, orientation angle, tag).
-
- node : int -> Indicate the node where the probe is located.
-
- orientation : float -> Probe orientation angle about the shaft.
- The 0 refers to +X direction.
- The strings 'major' and 'minor' can also be used to reference the major
- and minor axis.
-
- tag : str, optional -> Probe tag to be add a DataFrame column title.
+ List with rs.Probe objects.
probe_units : str, option
Units for probe orientation.
Default is "rad".
|
petrobras/ross
|
e0f441244a79142741f734afa111bebd87002623
|
diff --git a/ross/tests/test_probe.py b/ross/tests/test_probe.py
new file mode 100644
index 0000000..f8028c2
--- /dev/null
+++ b/ross/tests/test_probe.py
@@ -0,0 +1,21 @@
+import pytest
+from numpy.testing import assert_allclose
+
+from ross.probe import Probe
+from ross.units import Q_
+
+
[email protected]
+def probe():
+ return Probe(10, Q_(45, "degree"), "V1")
+
+
+def test_parameters(probe):
+ assert_allclose(probe.node, 10)
+ assert_allclose(probe.angle, 0.7853981633974483)
+
+
+def test_info(probe):
+ node, angle = probe.info
+ assert_allclose(node, 10)
+ assert_allclose(angle, 0.7853981633974483)
diff --git a/ross/tests/test_rotor_assembly.py b/ross/tests/test_rotor_assembly.py
index 893c803..5796410 100644
--- a/ross/tests/test_rotor_assembly.py
+++ b/ross/tests/test_rotor_assembly.py
@@ -10,6 +10,7 @@ from ross.bearing_seal_element import *
from ross.disk_element import *
from ross.materials import Material, steel
from ross.point_mass import *
+from ross.probe import Probe
from ross.rotor_assembly import *
from ross.shaft_element import *
from ross.units import Q_
@@ -1434,17 +1435,33 @@ def test_plot_mode(rotor7):
assert_allclose(fig.data[-3]["z"][:5], expected_z, rtol=1e-5)
-def test_unbalance(rotor7):
- unb = rotor7.run_unbalance_response(
+def test_unbalance(rotor3):
+ unb = rotor3.run_unbalance_response(
node=0, unbalance_magnitude=1, unbalance_phase=0, frequency=[50, 100]
)
- amplitude_expected = np.array([0.00274, 0.003526])
+ amplitude_expected = np.array([0.003065, 0.004169])
data = unb.data_magnitude(probe=[(0, 45)], probe_units="deg")
assert_allclose(data["Probe 1 - Node 0"], amplitude_expected, rtol=1e-4)
+ data = unb.data_magnitude(probe=[Probe(0, Q_(45, "deg"), tag="Probe 1 - Node 0")])
+ assert_allclose(data["Probe 1 - Node 0"], amplitude_expected, rtol=1e-4)
- phase_expected = np.array([0.730209, 0.545276])
+ phase_expected = np.array([0.785398, 0.785398])
data = unb.data_phase(probe=[(0, 45)], probe_units="deg")
assert_allclose(data["Probe 1 - Node 0"], phase_expected, rtol=1e-4)
+ data = unb.data_phase(probe=[Probe(0, Q_(45, "deg"), tag="Probe 1 - Node 0")])
+ assert_allclose(data["Probe 1 - Node 0"], phase_expected, rtol=1e-4)
+
+ amplitude_expected = np.array([0.003526, 0.005518])
+ data = unb.data_magnitude(probe=[(0, "major")])
+ assert_allclose(data["Probe 1 - Node 0"], amplitude_expected, rtol=1e-4)
+ data = unb.data_magnitude(probe=[Probe(0, "major", tag="Probe 1 - Node 0")])
+ assert_allclose(data["Probe 1 - Node 0"], amplitude_expected, rtol=1e-4)
+
+ phase_expected = np.array([1.5742, 1.573571])
+ data = unb.data_phase(probe=[(0, "major")], probe_units="deg")
+ assert_allclose(data["Probe 1 - Node 0"], phase_expected, rtol=1e-4)
+ data = unb.data_phase(probe=[Probe(0, "major", tag="Probe 1 - Node 0")])
+ assert_allclose(data["Probe 1 - Node 0"], phase_expected, rtol=1e-4)
def test_deflected_shape(rotor7):
|
Add Probe class
Currently we use list of tuples to add probes to our results.
The problem is that, contrary to other parts of ROSS, we are not able to use pint.Quantity to the probe orientation.
Instead, we have to declare the unit in a separate argument:
```
probe : list
List with tuples (node, orientation angle, tag).
node : int -> Indicate the node where the probe is located.
orientation : float -> Probe orientation angle about the shaft.
The 0 refers to +X direction.
The strings 'major' and 'minor' can also be used to reference the major
and minor axis.
tag : str, optional -> Probe tag to be add a DataFrame column title.
probe_units : str, option
Units for probe orientation.
Default is "rad".
```
With a probe class we would be able to do something like:
```python
probe_la = rs.Probe(15, Q_(45, 'deg'))
```
Another possibility would be to add filter (1x etc.) to the probe.
|
0.0
|
e0f441244a79142741f734afa111bebd87002623
|
[
"ross/tests/test_probe.py::test_parameters",
"ross/tests/test_probe.py::test_info",
"ross/tests/test_rotor_assembly.py::test_index_eigenvalues_rotor1",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor1",
"ross/tests/test_rotor_assembly.py::test_raise_if_element_outside_shaft",
"ross/tests/test_rotor_assembly.py::test_rotor_equality",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a0_0_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a0_1_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a1_0_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a1_1_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_evals_sorted_rotor2",
"ross/tests/test_rotor_assembly.py::test_rotor_attributes",
"ross/tests/test_rotor_assembly.py::test_kappa_rotor3",
"ross/tests/test_rotor_assembly.py::test_kappa_mode_rotor3",
"ross/tests/test_rotor_assembly.py::test_evals_rotor3_rotor4",
"ross/tests/test_rotor_assembly.py::test_campbell",
"ross/tests/test_rotor_assembly.py::test_freq_response_w_force",
"ross/tests/test_rotor_assembly.py::test_mesh_convergence",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor3",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor5",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor6",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor9",
"ross/tests/test_rotor_assembly.py::test_static_analysis_high_stiffness",
"ross/tests/test_rotor_assembly.py::test_static_bearing_with_disks",
"ross/tests/test_rotor_assembly.py::test_run_critical_speed",
"ross/tests/test_rotor_assembly.py::test_coaxial_rotor_assembly",
"ross/tests/test_rotor_assembly.py::test_from_section",
"ross/tests/test_rotor_assembly.py::test_whirl_values",
"ross/tests/test_rotor_assembly.py::test_kappa_mode",
"ross/tests/test_rotor_assembly.py::test_kappa_axes_values",
"ross/tests/test_rotor_assembly.py::test_plot_mode",
"ross/tests/test_rotor_assembly.py::test_unbalance",
"ross/tests/test_rotor_assembly.py::test_deflected_shape",
"ross/tests/test_rotor_assembly.py::test_global_index",
"ross/tests/test_rotor_assembly.py::test_distinct_dof_elements_error",
"ross/tests/test_rotor_assembly.py::test_modal_6dof",
"ross/tests/test_rotor_assembly.py::test_ucs_calc_rotor2",
"ross/tests/test_rotor_assembly.py::test_ucs_calc",
"ross/tests/test_rotor_assembly.py::test_ucs_rotor9",
"ross/tests/test_rotor_assembly.py::test_pickle",
"ross/tests/test_rotor_assembly.py::test_save_load",
"ross/tests/test_rotor_assembly.py::test_plot_rotor",
"ross/tests/test_rotor_assembly.py::test_plot_rotor_without_disk",
"ross/tests/test_rotor_assembly.py::test_axial_force",
"ross/tests/test_rotor_assembly.py::test_torque",
"ross/tests/test_rotor_assembly.py::test_rotor_conical_frequencies"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-17 13:33:20+00:00
|
apache-2.0
| 4,527 |
|
petrobras__ross-966
|
diff --git a/ross/results.py b/ross/results.py
index 129042b..4f23647 100644
--- a/ross/results.py
+++ b/ross/results.py
@@ -1129,6 +1129,7 @@ class ModalResults(Results):
self,
mode=None,
fig=None,
+ orientation="major",
frequency_type="wd",
title=None,
length_units="m",
@@ -1144,6 +1145,9 @@ class ModalResults(Results):
The n'th vibration mode
fig : Plotly graph_objects.Figure()
The figure object with the plot.
+ orientation : str, optional
+ Orientation can be 'major', 'x' or 'y'.
+ Default is 'major' to display the major axis.
frequency_type : str, optional
"wd" calculates the damped natural frequencies.
"wn" calculates the undamped natural frequencies.
@@ -1187,7 +1191,7 @@ class ModalResults(Results):
}
shape = self.shapes[mode]
- fig = shape.plot_2d(fig=fig)
+ fig = shape.plot_2d(fig=fig, orientation=orientation)
if title is None:
title = ""
|
petrobras/ross
|
3895ca66b22f7e859956a732bba72a3846d1d03d
|
diff --git a/ross/tests/test_rotor_assembly.py b/ross/tests/test_rotor_assembly.py
index 5796410..73e1ce2 100644
--- a/ross/tests/test_rotor_assembly.py
+++ b/ross/tests/test_rotor_assembly.py
@@ -329,6 +329,48 @@ def rotor3():
return Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])
+def test_modal_fig_orientation(rotor3):
+ modal1 = rotor3.run_modal(Q_(900, "RPM"))
+ fig1 = modal1.plot_mode_2d(1, orientation="major")
+ data_major = fig1.data[0].y
+
+ # fmt: off
+ expected_data_major = np.array([
+ 0.3330699 , 0.41684076, 0.49947039, 0.5796177 , 0.65594162,
+ 0.65594162, 0.72732014, 0.79268256, 0.85076468, 0.90030229,
+ 0.90030229, 0.9402937 , 0.97041024, 0.99039723, 1. ,
+ 1. , 0.99901483, 0.98731591, 0.9647654 , 0.93122548,
+ 0.93122548, 0.88677476, 0.83255026, 0.77000169, 0.70057879,
+ 0.70057879, 0.62550815, 0.54607111, 0.46379946, 0.38022502
+ ])
+ # fmt: on
+
+ modal2 = rotor3.run_modal(Q_(900, "RPM"))
+ fig2 = modal2.plot_mode_2d(1, orientation="x")
+ data_x = fig2.data[0].y
+
+ modal3 = rotor3.run_modal(Q_(900, "RPM"))
+ fig3 = modal3.plot_mode_2d(1, orientation="y")
+ data_y = fig3.data[0].y
+
+ # fmt: off
+ expected_data_y = np.array([
+ 1.63888742e-13, 1.97035201e-13, 2.29738935e-13, 2.61467959e-13,
+ 2.91690288e-13, 2.91690288e-13, 3.19972642e-13, 3.45901475e-13,
+ 3.68974412e-13, 3.88689077e-13, 3.88689077e-13, 4.04657656e-13,
+ 4.16754177e-13, 4.24869024e-13, 4.28892585e-13, 4.28892585e-13,
+ 4.28743563e-13, 4.24376114e-13, 4.15733802e-13, 4.02760190e-13,
+ 4.02760190e-13, 3.85469076e-13, 3.64306492e-13, 3.39864356e-13,
+ 3.12734588e-13, 3.12734588e-13, 2.83402610e-13, 2.52356655e-13,
+ 2.20192854e-13, 1.87507335e-13
+ ])
+ # fmt: on
+
+ assert_almost_equal(data_major, expected_data_major)
+ assert_almost_equal(data_x, expected_data_major)
+ assert_almost_equal(data_y, expected_data_y)
+
+
@pytest.fixture
def rotor3_odd():
# Rotor without damping with odd number of shaft elements (7)
|
Orientation is not available to plot_2d
When running modal analysis and plotting the results we use:
```python
modal = rotor.run_modal(...)
modal.plot_mode_2d(...)
```
In `plot_mode_2d()` we do not have an option to pass `orientation='y'` to the method, although that is available in the `Shape.plot_mode_2d()` method.
We should add the `orientation` argument to the `plot_mode_2d` method and add this to the docstring.
|
0.0
|
3895ca66b22f7e859956a732bba72a3846d1d03d
|
[
"ross/tests/test_rotor_assembly.py::test_modal_fig_orientation"
] |
[
"ross/tests/test_rotor_assembly.py::test_index_eigenvalues_rotor1",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor1",
"ross/tests/test_rotor_assembly.py::test_raise_if_element_outside_shaft",
"ross/tests/test_rotor_assembly.py::test_rotor_equality",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a0_0_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a0_1_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a1_0_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a1_1_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_evals_sorted_rotor2",
"ross/tests/test_rotor_assembly.py::test_rotor_attributes",
"ross/tests/test_rotor_assembly.py::test_kappa_rotor3",
"ross/tests/test_rotor_assembly.py::test_kappa_mode_rotor3",
"ross/tests/test_rotor_assembly.py::test_evals_rotor3_rotor4",
"ross/tests/test_rotor_assembly.py::test_campbell",
"ross/tests/test_rotor_assembly.py::test_freq_response_w_force",
"ross/tests/test_rotor_assembly.py::test_mesh_convergence",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor3",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor5",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor6",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor9",
"ross/tests/test_rotor_assembly.py::test_static_analysis_high_stiffness",
"ross/tests/test_rotor_assembly.py::test_static_bearing_with_disks",
"ross/tests/test_rotor_assembly.py::test_run_critical_speed",
"ross/tests/test_rotor_assembly.py::test_coaxial_rotor_assembly",
"ross/tests/test_rotor_assembly.py::test_from_section",
"ross/tests/test_rotor_assembly.py::test_whirl_values",
"ross/tests/test_rotor_assembly.py::test_kappa_mode",
"ross/tests/test_rotor_assembly.py::test_kappa_axes_values",
"ross/tests/test_rotor_assembly.py::test_plot_mode",
"ross/tests/test_rotor_assembly.py::test_unbalance",
"ross/tests/test_rotor_assembly.py::test_deflected_shape",
"ross/tests/test_rotor_assembly.py::test_global_index",
"ross/tests/test_rotor_assembly.py::test_distinct_dof_elements_error",
"ross/tests/test_rotor_assembly.py::test_modal_6dof",
"ross/tests/test_rotor_assembly.py::test_ucs_calc_rotor2",
"ross/tests/test_rotor_assembly.py::test_ucs_calc",
"ross/tests/test_rotor_assembly.py::test_ucs_rotor9",
"ross/tests/test_rotor_assembly.py::test_pickle",
"ross/tests/test_rotor_assembly.py::test_save_load",
"ross/tests/test_rotor_assembly.py::test_plot_rotor",
"ross/tests/test_rotor_assembly.py::test_plot_rotor_without_disk",
"ross/tests/test_rotor_assembly.py::test_axial_force",
"ross/tests/test_rotor_assembly.py::test_torque",
"ross/tests/test_rotor_assembly.py::test_rotor_conical_frequencies"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-04-20 20:59:41+00:00
|
mit
| 4,528 |
|
petrobras__ross-972
|
diff --git a/ross/results.py b/ross/results.py
index 97d2913..20fdfc1 100644
--- a/ross/results.py
+++ b/ross/results.py
@@ -389,7 +389,7 @@ class Shape(Results):
xmax, ixmax = max(abs(modex)), np.argmax(abs(modex))
ymax, iymax = max(abs(modey)), np.argmax(abs(modey))
- if ymax > 0.4 * xmax:
+ if ymax > xmax:
evec /= modey[iymax]
else:
evec /= modex[ixmax]
|
petrobras/ross
|
8b7889b77d248f959e8f8c3d45f188e5853c0e02
|
diff --git a/ross/tests/test_rotor_assembly.py b/ross/tests/test_rotor_assembly.py
index e0db857..de90b1b 100644
--- a/ross/tests/test_rotor_assembly.py
+++ b/ross/tests/test_rotor_assembly.py
@@ -488,13 +488,13 @@ def test_kappa_rotor3(rotor3):
assert_allclose(modal3_2000.kappa(0, 0)["kappa"], -0.6148843693807821, rtol=1e-3)
assert_allclose(modal3_2000.kappa(0, 1)["Frequency"], 88.98733511566752, rtol=1e-3)
- assert_allclose(modal3_2000.kappa(0, 1)["Major axis"], 0.480048, rtol=1e-3)
- assert_allclose(modal3_2000.kappa(0, 1)["Minor axis"], 0.40597, rtol=1e-3)
+ assert_allclose(modal3_2000.kappa(0, 1)["Major axis"], 0.353984, rtol=1e-3)
+ assert_allclose(modal3_2000.kappa(0, 1)["Minor axis"], 0.299359, rtol=1e-3)
assert_allclose(modal3_2000.kappa(0, 1)["kappa"], 0.8456866641084784, rtol=1e-3)
assert_allclose(modal3_2000.kappa(1, 1)["Frequency"], 88.98733511566752, rtol=1e-3)
- assert_allclose(modal3_2000.kappa(1, 1)["Major axis"], 0.911015, rtol=1e-3)
- assert_allclose(modal3_2000.kappa(1, 1)["Minor axis"], 0.692178, rtol=1e-3)
+ assert_allclose(modal3_2000.kappa(1, 1)["Major axis"], 0.671776, rtol=1e-3)
+ assert_allclose(modal3_2000.kappa(1, 1)["Minor axis"], 0.510407, rtol=1e-3)
assert_allclose(modal3_2000.kappa(1, 1)["kappa"], 0.7597878964314968, rtol=1e-3)
@@ -1503,32 +1503,24 @@ def test_plot_mode(rotor7):
modal7 = rotor7.run_modal(50, sparse=False)
fig = modal7.plot_orbit(1, 3)
- expected_x = np.array([-1.750102e-02, -3.499667e-02, -5.248161e-02, -6.995046e-02])
- expected_y = np.array([1.0, 0.999847, 0.999387, 0.998622, 0.99755])
+ expected_radius = 1
+
assert fig.data[0]["line"]["color"] == "#1f77b4" # blue
- assert_allclose(fig.data[0]["x"][1:5], expected_x, rtol=1e-5)
- assert_allclose(fig.data[0]["y"][:5], expected_y, rtol=1e-5)
+ assert_allclose(
+ np.sqrt(fig.data[0].x ** 2 + fig.data[0].y ** 2)[0], expected_radius
+ )
fig = modal7.plot_mode_2d(1)
- expected_x = np.array([0.0, 0.0625, 0.125, 0.1875, 0.25])
- expected_y = np.array([0.333274, 0.416889, 0.499442, 0.579556, 0.655866])
-
- assert fig.data[0]["line"]["color"] == "#1f77b4" # blue
- assert_allclose(fig.data[0]["x"][:5], expected_x, rtol=1e-5)
- assert_allclose(fig.data[0]["y"][:5], expected_y, rtol=1e-5)
+ mode_shape = fig.data[0].y
+ mode_x = fig.data[0].x
- fig = modal7.plot_mode_3d(1)
+ poly_coefs = np.polyfit(mode_x, mode_shape, 3)
- expected_x = np.array([0.0, 0.0625, 0.125, 0.1875, 0.25])
- expected_y = np.array([0.01876129, 0.01631675, 0.01390729, 0.0115747, 0.00936075])
- expected_z = np.array([0.33274591, 0.41656925, 0.49924871, 0.5794401, 0.65579925])
+ expected_coefs = np.array([-0.05672087, -1.04116649, 1.719815])
assert fig.data[0]["line"]["color"] == "#1f77b4" # blue
- # -3 is the black line that passes through each orbit starting point
- assert_allclose(fig.data[-3]["x"][:5], expected_x, rtol=1e-5)
- assert_allclose(fig.data[-3]["y"][:5], expected_y, rtol=1e-5)
- assert_allclose(fig.data[-3]["z"][:5], expected_z, rtol=1e-5)
+ assert_allclose(poly_coefs[:-1], expected_coefs, rtol=1e-5)
def test_unbalance(rotor3):
|
Change how we normalize the vectors in Shape objects
`Shape` objects have the option to normalize the vector which is passed in instantiation. This is used when creating shape objects for the mode shapes.
It would make sense that the max major axis in each normalized mode shape would be equal to one, but that is not the case for our current code.
|
0.0
|
8b7889b77d248f959e8f8c3d45f188e5853c0e02
|
[
"ross/tests/test_rotor_assembly.py::test_kappa_rotor3"
] |
[
"ross/tests/test_rotor_assembly.py::test_index_eigenvalues_rotor1",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor1",
"ross/tests/test_rotor_assembly.py::test_raise_if_element_outside_shaft",
"ross/tests/test_rotor_assembly.py::test_rotor_equality",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor2_with_bearing_mass",
"ross/tests/test_rotor_assembly.py::test_mass_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a0_0_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a0_1_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a1_0_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_a1_1_matrix_rotor2",
"ross/tests/test_rotor_assembly.py::test_evals_sorted_rotor2",
"ross/tests/test_rotor_assembly.py::test_modal_fig_orientation",
"ross/tests/test_rotor_assembly.py::test_rotor_attributes",
"ross/tests/test_rotor_assembly.py::test_kappa_mode_rotor3",
"ross/tests/test_rotor_assembly.py::test_evals_rotor3_rotor4",
"ross/tests/test_rotor_assembly.py::test_campbell",
"ross/tests/test_rotor_assembly.py::test_freq_response_w_force",
"ross/tests/test_rotor_assembly.py::test_mesh_convergence",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor3",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor5",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor6",
"ross/tests/test_rotor_assembly.py::test_static_analysis_rotor9",
"ross/tests/test_rotor_assembly.py::test_static_analysis_high_stiffness",
"ross/tests/test_rotor_assembly.py::test_static_bearing_with_disks",
"ross/tests/test_rotor_assembly.py::test_run_critical_speed",
"ross/tests/test_rotor_assembly.py::test_coaxial_rotor_assembly",
"ross/tests/test_rotor_assembly.py::test_from_section",
"ross/tests/test_rotor_assembly.py::test_whirl_values",
"ross/tests/test_rotor_assembly.py::test_kappa_mode",
"ross/tests/test_rotor_assembly.py::test_kappa_axes_values",
"ross/tests/test_rotor_assembly.py::test_plot_mode",
"ross/tests/test_rotor_assembly.py::test_unbalance",
"ross/tests/test_rotor_assembly.py::test_deflected_shape",
"ross/tests/test_rotor_assembly.py::test_global_index",
"ross/tests/test_rotor_assembly.py::test_distinct_dof_elements_error",
"ross/tests/test_rotor_assembly.py::test_modal_6dof",
"ross/tests/test_rotor_assembly.py::test_ucs_calc_rotor2",
"ross/tests/test_rotor_assembly.py::test_ucs_calc",
"ross/tests/test_rotor_assembly.py::test_ucs_rotor9",
"ross/tests/test_rotor_assembly.py::test_pickle",
"ross/tests/test_rotor_assembly.py::test_save_load",
"ross/tests/test_rotor_assembly.py::test_plot_rotor",
"ross/tests/test_rotor_assembly.py::test_plot_rotor_without_disk",
"ross/tests/test_rotor_assembly.py::test_axial_force",
"ross/tests/test_rotor_assembly.py::test_torque",
"ross/tests/test_rotor_assembly.py::test_rotor_conical_frequencies"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-05-02 16:59:01+00:00
|
mit
| 4,529 |
|
pgmpy__pgmpy-1059
|
diff --git a/pgmpy/readwrite/BIF.py b/pgmpy/readwrite/BIF.py
index affdcf8d..af0bbbf2 100644
--- a/pgmpy/readwrite/BIF.py
+++ b/pgmpy/readwrite/BIF.py
@@ -95,7 +95,7 @@ class BIFReader(object):
# 1.00 or 1 or 1.00. 0.00 or 9.8e-5 etc
num_expr = Word(nums + '-' + '+' + 'e' + 'E' + '.') + Suppress(Optional(","))
probability_expr = Suppress('probability') + Suppress('(') + OneOrMore(word_expr) + Suppress(')')
- optional_expr = Suppress('(') + Suppress(OneOrMore(word_expr2)) + Suppress(')')
+ optional_expr = Suppress('(') + OneOrMore(word_expr2) + Suppress(')')
probab_attributes = optional_expr | Suppress('table')
cpd_expr = probab_attributes + OneOrMore(num_expr)
@@ -237,21 +237,27 @@ class BIFReader(object):
"""
variable_cpds = {}
for block in self.probability_block():
- name = self.probability_expr.searchString(block)[0][0]
+ names = self.probability_expr.searchString(block)
+ var_name, parents = names[0][0], names[0][1:]
cpds = self.cpd_expr.searchString(block)
- arr = [float(j) for i in cpds for j in i]
if 'table' in block:
+ arr = [float(j) for i in cpds for j in i]
arr = numpy.array(arr)
- arr = arr.reshape((len(self.variable_states[name]),
- arr.size // len(self.variable_states[name])))
+ arr = arr.reshape((len(self.variable_states[var_name]),
+ arr.size // len(self.variable_states[var_name])))
else:
- length = len(self.variable_states[name])
- reshape_arr = [[] for i in range(length)]
- for i, val in enumerate(arr):
- reshape_arr[i % length].append(val)
- arr = reshape_arr
+ length = sum(len(self.variable_states[var]) for var in parents)
+ arr = [[0 for j in range(length)] for i in self.variable_states[var_name]]
+ length = len(self.variable_states[var_name])
+ for prob_line in cpds:
+ states = prob_line[:len(parents)]
+ vals = [float(i) for i in prob_line[len(parents):]]
+ offset = sum((len(parents)-i)*self.variable_states[parents[i]].index(states[i])
+ for i in range(len(states)))
+ for i, val in enumerate(vals):
+ arr[i][offset] = val
arr = numpy.array(arr)
- variable_cpds[name] = arr
+ variable_cpds[var_name] = arr
return variable_cpds
|
pgmpy/pgmpy
|
c5ba23a8f7891c0f32df56b24fa2dc5fc0ddbc85
|
diff --git a/pgmpy/tests/test_readwrite/test_BIF.py b/pgmpy/tests/test_readwrite/test_BIF.py
index 3142f057..e45a9e32 100644
--- a/pgmpy/tests/test_readwrite/test_BIF.py
+++ b/pgmpy/tests/test_readwrite/test_BIF.py
@@ -111,6 +111,51 @@ probability ( "family-out" ) { //1 variable(s) and 2 values
np_test.assert_array_equal(cpd_expected[variable],
cpd[variable])
+ def test_get_values_reordered(self):
+
+ cancer_values1 = BIFReader(string="""
+network unknown {
+}
+variable Pollution {
+ type discrete [ 2 ] { low, high };
+}
+variable Smoker {
+ type discrete [ 2 ] { True, False };
+}
+variable Cancer {
+ type discrete [ 2 ] { True, False };
+}
+probability ( Cancer | Pollution, Smoker ) {
+ (low, True) 0.03, 0.97;
+ (low, False) 0.001, 0.999;
+ (high, True) 0.05, 0.95;
+ (high, False) 0.02, 0.98;
+}
+ """).get_values()
+
+ cancer_values2 = BIFReader(string="""
+network unknown {
+}
+variable Pollution {
+ type discrete [ 2 ] { low, high };
+}
+variable Smoker {
+ type discrete [ 2 ] { True, False };
+}
+variable Cancer {
+ type discrete [ 2 ] { True, False };
+}
+probability ( Cancer | Pollution, Smoker ) {
+ (low, True) 0.03, 0.97;
+ (high, True) 0.05, 0.95;
+ (low, False) 0.001, 0.999;
+ (high, False) 0.02, 0.98;
+}
+ """).get_values()
+
+ for var in cancer_values1:
+ np_test.assert_array_equal(cancer_values1[var], cancer_values2[var])
+
def test_get_parents(self):
parents_expected = {'bowel-problem': [],
|
Bug in BIFReader function
### Subject of the issue
There is a bug in the BIFReader function. I think while reading the CPD only the order in which the states are specified is taken into account , not the actual values of the state.
e.g. variable Pollution {low high} **high - 1 low - 0**
variable Smoker {True False} **True - 0, False-1**
probability ( Cancer | Pollution, Smoker ) {
(low, True) 0.03, 0.97;
(high, True) 0.05, 0.95; **should be 1,0 instead of 0,1**
(low, False) 0.001, 0.999;**should be 0,1 instead of 1,0**
(high, False) 0.02, 0.98;
}
probability ( Cancer | Pollution, Smoker ) {
(low, True) 0.03, 0.97;
(low, False) 0.001, 0.999;
(high, True) 0.05, 0.95;
(high, False) 0.02, 0.98;
}
The two CPDs are equivalent but read differently by the function.
### Your environment
* pgmpy version 0.1.7
* Python version 3.5
* Operating System Unbuntu16.04
### Steps to reproduce
Attached are two benchmark files cancer1.bif and cancer2.bif with the same data but the order of specifying the states of CPD is changed for one of the tables. Inference on both models gives different results even though the model is the same.
Sample run file is also attached.
### Expected behaviour
The result of the query should be same in both cases since models are equivalent.
### Actual behaviour
Results with model file1:
```
+----------+---------------+
| Cancer | phi(Cancer) |
+==========+===============+
| Cancer_0 | 0.0116 |
+----------+---------------+
| Cancer_1 | 0.9884 |
+----------+---------------+
Results with model file2:
+----------+---------------+
| Cancer | phi(Cancer) |
+==========+===============+
| Cancer_0 | 0.0410 |
+----------+---------------+
| Cancer_1 | 0.9590 |
+----------+---------------+
```
[test.zip](https://github.com/pgmpy/pgmpy/files/2775737/test.zip)
|
0.0
|
c5ba23a8f7891c0f32df56b24fa2dc5fc0ddbc85
|
[
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_get_values_reordered"
] |
[
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_get_edges",
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_get_parents",
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_get_property",
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_get_values",
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_get_variables",
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_network_name",
"pgmpy/tests/test_readwrite/test_BIF.py::TestBIFReader::test_states"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-02-04 19:56:25+00:00
|
mit
| 4,530 |
|
phac-nml__irida-uploader-142
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cef3b7d..8ee2cf4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,14 @@
Changes
=======
+Beta 0.8.3
+----------
+Changes:
+* [GUI] Partial uploads now display the reason (error) that caused the run to stop.
+
+Bug Fixes:
+* Fixed Error's setting run to ERROR when run has uploaded samples. Now sets these runs to PARTIAL to allow continuing from where the Error occurred.
+
Beta 0.8.2
----------
Bug Fixes:
diff --git a/iridauploader/__init__.py b/iridauploader/__init__.py
index b0c9879..e1fc42e 100644
--- a/iridauploader/__init__.py
+++ b/iridauploader/__init__.py
@@ -1,1 +1,1 @@
-VERSION_NUMBER = "0.8.2"
+VERSION_NUMBER = "0.8.3"
diff --git a/iridauploader/core/api_handler.py b/iridauploader/core/api_handler.py
index 4abeae8..90e9653 100644
--- a/iridauploader/core/api_handler.py
+++ b/iridauploader/core/api_handler.py
@@ -162,6 +162,7 @@ def upload_sequencing_run(sequencing_run, directory_status, upload_mode, run_id=
logging.info("Using existing run id '{}' for this upload.".format(run_id))
# Update directory status file
directory_status.run_id = run_id
+ directory_status.status = model.DirectoryStatus.PARTIAL
progress.write_directory_status(directory_status)
try:
diff --git a/iridauploader/core/upload_helpers.py b/iridauploader/core/upload_helpers.py
index 6595b79..47e8927 100644
--- a/iridauploader/core/upload_helpers.py
+++ b/iridauploader/core/upload_helpers.py
@@ -28,6 +28,11 @@ def _set_and_write_directory_status(directory_status, status, message=None):
:param message: string
:return:
"""
+ # If a run is being set to error, but it already has a sequencing id (started upload),
+ # it is a partial run that could be resumed.
+ if status == DirectoryStatus.ERROR and directory_status.run_id is not None:
+ status = DirectoryStatus.PARTIAL
+
try:
directory_status.status = status
directory_status.message = message
@@ -76,8 +81,6 @@ def parse_and_validate(directory_status, parse_as_partial):
:param parse_as_partial: sequencing_run will not include any samples that have already been uploaded
:return:
"""
- # Set directory status to partial before starting
- _set_and_write_directory_status(directory_status, DirectoryStatus.PARTIAL)
try:
sequencing_run = parsing_handler.parse_and_validate(directory_status.directory)
diff --git a/iridauploader/gui/main_dialog.py b/iridauploader/gui/main_dialog.py
index 6301c05..9bcffa5 100644
--- a/iridauploader/gui/main_dialog.py
+++ b/iridauploader/gui/main_dialog.py
@@ -413,8 +413,11 @@ class MainDialog(QtWidgets.QDialog):
# We need to block upload until the user clicks continue
self._upload_button.set_block()
# give user info
- self._show_and_fill_info_partial_upload_options(
- "This run directory is partially uploaded. Choose how you would like to Continue.")
+ user_message = "This run directory is partially uploaded. Choose how you would like to Continue."
+ if status.message is not None:
+ user_message = user_message + " This directory had the error(s) below. "
+ self._show_previous_error(status.message)
+ self._show_and_fill_info_partial_upload_options(user_message)
elif status.status_equals(DirectoryStatus.ERROR):
# We need to block upload until the user clicks continue
diff --git a/setup.py b/setup.py
index d485422..64fa87e 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name='iridauploader',
- version='0.8.2',
+ version='0.8.3',
description='IRIDA uploader: upload NGS data to IRIDA system',
url='https://github.com/phac-nml/irida-uploader',
author='Jeffrey Thiessen',
diff --git a/windows-installer.cfg b/windows-installer.cfg
index a8958e2..49930ba 100644
--- a/windows-installer.cfg
+++ b/windows-installer.cfg
@@ -1,6 +1,6 @@
[Application]
name=IRIDA Uploader GUI
-version=0.8.2
+version=0.8.3
entry_point=iridauploader.gui.gui:main
icon=iridauploader/gui/images/icon.ico
# Uncomment this to have a console show alongside the application
|
phac-nml/irida-uploader
|
e1f0974e68be93a2a69b11bb1cb569d95191e499
|
diff --git a/iridauploader/tests/core/test_api_handler.py b/iridauploader/tests/core/test_api_handler.py
index f41cc16..8dfc3bc 100644
--- a/iridauploader/tests/core/test_api_handler.py
+++ b/iridauploader/tests/core/test_api_handler.py
@@ -8,6 +8,7 @@ from iridauploader.parsers.miseq.parser import Parser
from iridauploader.api import MODE_DEFAULT, MODE_FAST5, MODE_ASSEMBLIES
from iridauploader.api.exceptions import IridaResourceError
from iridauploader.model.exceptions import ModelValidationError
+from iridauploader.model import DirectoryStatus
path_to_module = path.abspath(path.dirname(__file__))
if len(path_to_module) == 0:
@@ -151,6 +152,7 @@ class TestUploadSequencingRun(unittest.TestCase):
class StubDirectoryStatus:
run_id = None
+ status = None
def set_sample_uploaded(self, sample_name, project_id, uploaded):
return None
@@ -189,12 +191,13 @@ class TestUploadSequencingRun(unittest.TestCase):
stub_api_instance.set_seq_run_uploading.side_effect = [True]
stub_api_instance.send_sequence_files.side_effect = [True, True, True]
stub_api_instance.set_seq_run_complete.side_effect = [True]
+ stub_directory_status = self.StubDirectoryStatus()
mock_api_instance.side_effect = [stub_api_instance]
mock_progress.side_effect = [None, None, None, None, None, None, None, None, None, None]
api_handler.upload_sequencing_run(sequencing_run,
- directory_status=self.StubDirectoryStatus(),
+ directory_status=stub_directory_status,
upload_mode=MODE_DEFAULT)
# ensure the response matches our mocks, and that all the needed functions were called correctly
@@ -210,6 +213,9 @@ class TestUploadSequencingRun(unittest.TestCase):
upload_id=55, upload_mode=MODE_DEFAULT)
])
stub_api_instance.set_seq_run_complete.assert_called_once_with(mock_sequence_run_id)
+ # Verify the DirectoryStatus object got assigned a run_id and status for upload
+ self.assertEqual(stub_directory_status.status, DirectoryStatus.PARTIAL)
+ self.assertEqual(stub_directory_status.run_id, mock_sequence_run_id)
@patch("iridauploader.core.api_handler._get_api_instance")
@patch("iridauploader.progress.write_directory_status")
diff --git a/iridauploader/tests/core/test_upload_helpers.py b/iridauploader/tests/core/test_upload_helpers.py
index 69ae1a1..1b71d62 100644
--- a/iridauploader/tests/core/test_upload_helpers.py
+++ b/iridauploader/tests/core/test_upload_helpers.py
@@ -22,6 +22,7 @@ class TestSetAndWriteDirectoryStatus(unittest.TestCase):
class StubDirectoryStatus:
status = None
message = ""
+ run_id = None
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
@@ -43,6 +44,46 @@ class TestSetAndWriteDirectoryStatus(unittest.TestCase):
# verify write
mock_progress.write_directory_status.assert_called_with(stub_dir_status)
+ @patch("iridauploader.core.upload_helpers.progress")
+ def test_valid_write_error(self, mock_progress):
+ """
+ Tessts a valid write attempt of ERROR, where the upload has not begun
+ :param mock_progress:
+ :return:
+ """
+ mock_initial_status = DirectoryStatus.ERROR
+ mock_expected_status = DirectoryStatus.ERROR
+ mock_message = "message"
+ stub_dir_status = self.StubDirectoryStatus()
+ stub_dir_status.run_id = None # Upload has not started
+ # mock main call to test
+ mock_progress.write_directory_status.side_effect = [None]
+ # run function
+ upload_helpers._set_and_write_directory_status(stub_dir_status, mock_initial_status, mock_message)
+ # verify write
+ stub_dir_status.status = mock_expected_status
+ mock_progress.write_directory_status.assert_called_with(stub_dir_status)
+
+ @patch("iridauploader.core.upload_helpers.progress")
+ def test_valid_write_partial(self, mock_progress):
+ """
+ Tessts a valid write attempt of PARTIAL, ERROR is given but the run has started
+ :param mock_progress:
+ :return:
+ """
+ mock_initial_status = DirectoryStatus.ERROR
+ mock_expected_status = DirectoryStatus.ERROR
+ mock_message = "message"
+ stub_dir_status = self.StubDirectoryStatus()
+ stub_dir_status.run_id = 123 # Upload has not started
+ # mock main call to test
+ mock_progress.write_directory_status.side_effect = [None]
+ # run function
+ upload_helpers._set_and_write_directory_status(stub_dir_status, mock_initial_status, mock_message)
+ # verify write
+ stub_dir_status.status = mock_expected_status
+ mock_progress.write_directory_status.assert_called_with(stub_dir_status)
+
@patch("iridauploader.core.upload_helpers.progress.write_directory_status")
def test_invalid_write(self, mock_progress):
"""
@@ -114,7 +155,6 @@ class TestParseAndValidate(unittest.TestCase):
def test_valid_parse(self, mock_parsing_handler, mock_set_and_write):
"""
verifies parse and validate was called,
- and _set_and_write_directory_status is called once with PARTIAL
:param mock_parsing_handler:
:param mock_set_and_write:
:return:
@@ -127,15 +167,13 @@ class TestParseAndValidate(unittest.TestCase):
directory_status=stub_directory_status, parse_as_partial=False))
mock_parsing_handler.parse_and_validate.assert_called_with(stub_directory_status.directory)
- mock_set_and_write.assert_called_with(stub_directory_status, DirectoryStatus.PARTIAL)
@patch("iridauploader.core.upload_helpers.set_uploaded_samples_to_skip")
@patch("iridauploader.core.upload_helpers._set_and_write_directory_status")
@patch("iridauploader.core.upload_helpers.parsing_handler")
def test_valid_partial_parse(self, mock_parsing_handler, mock_set_and_write, mock_set_uploaded_samples_to_skip):
"""
- verifies parse and validate was called,
- and _set_and_write_directory_status is called once with PARTIAL
+ verifies parse and validate was called with partial run handling
:param mock_parsing_handler:
:param mock_set_and_write:
:return:
@@ -149,7 +187,6 @@ class TestParseAndValidate(unittest.TestCase):
directory_status=stub_directory_status, parse_as_partial=True))
mock_parsing_handler.parse_and_validate.assert_called_with(stub_directory_status.directory)
- mock_set_and_write.assert_called_with(stub_directory_status, DirectoryStatus.PARTIAL)
mock_set_uploaded_samples_to_skip.assert_called_with("return_value",
stub_directory_status.get_sample_status_list())
@@ -422,6 +459,11 @@ class TestUploadSequencingRun(unittest.TestCase):
Tests core.upload_helpers.upload_sequencing_run
"""
+ class StubDirectoryStatus:
+ status = None
+ message = ""
+ run_id = None
+
def setUp(self):
print("\nStarting " + self.__module__ + ": " + self._testMethodName)
@@ -480,7 +522,7 @@ class TestUploadSequencingRun(unittest.TestCase):
def test_invalid_connection(self, mock_api_handler, mock_set_and_write):
"""
tests catching and raising IridaConnectionError
- and _set_and_write_directory_status is DirectoryStatus.COMPLETE
+ and _set_and_write_directory_status is DirectoryStatus.ERROR
:param mock_api_handler:
:param mock_set_and_write:
:return:
@@ -488,16 +530,23 @@ class TestUploadSequencingRun(unittest.TestCase):
mock_api_handler.upload_sequencing_run.side_effect = [IridaConnectionError]
mock_set_and_write.side_effect = [True]
+ # Init directory status as a new run
+ stub_directory_status = self.StubDirectoryStatus()
+ stub_directory_status.status = DirectoryStatus.NEW
+ stub_directory_status.run_id = None
+
with self.assertRaises(IridaConnectionError):
- upload_helpers.upload_sequencing_run(directory_status='status',
+ upload_helpers.upload_sequencing_run(directory_status=stub_directory_status,
sequencing_run='run',
upload_mode='mode')
- mock_api_handler.upload_sequencing_run.assert_called_with(directory_status='status',
+ mock_api_handler.upload_sequencing_run.assert_called_with(directory_status=stub_directory_status,
sequencing_run='run',
upload_mode='mode',
run_id=None)
- mock_set_and_write.assert_called_with("status", DirectoryStatus.ERROR, 'Lost connection to Irida. Errors: ()')
+ mock_set_and_write.assert_called_with(stub_directory_status,
+ DirectoryStatus.ERROR,
+ 'Lost connection to Irida. Errors: ()')
@patch("iridauploader.core.upload_helpers._set_and_write_directory_status")
@patch("iridauploader.core.upload_helpers.api_handler")
|
GUI: partially uploaded runs resulting in error are not being detected as partial
This means the user cannot continue an upload if their upload stops halfway through do to a server error
|
0.0
|
e1f0974e68be93a2a69b11bb1cb569d95191e499
|
[
"iridauploader/tests/core/test_api_handler.py::TestUploadSequencingRun::test_valid_all_functions_called"
] |
[
"iridauploader/tests/core/test_api_handler.py::TestPrepareAndValidateForUpload::test_invalid_could_not_send_sample",
"iridauploader/tests/core/test_api_handler.py::TestPrepareAndValidateForUpload::test_invalid_validation_project_does_not_exist",
"iridauploader/tests/core/test_api_handler.py::TestPrepareAndValidateForUpload::test_valid_all_functions_called",
"iridauploader/tests/core/test_api_handler.py::TestPrepareAndValidateForUpload::test_valid_send_sample",
"iridauploader/tests/core/test_api_handler.py::TestUploadSequencingRun::test_invalid_error_raised",
"iridauploader/tests/core/test_api_handler.py::TestUploadSequencingRun::test_valid_all_functions_called_assemblies",
"iridauploader/tests/core/test_api_handler.py::TestUploadSequencingRun::test_valid_all_functions_called_fast5",
"iridauploader/tests/core/test_api_handler.py::TestSendProject::test_all_functions_called",
"iridauploader/tests/core/test_api_handler.py::TestSendProject::test_can_not_send_project",
"iridauploader/tests/core/test_api_handler.py::TestSendProject::test_project_invalid",
"iridauploader/tests/core/test_upload_helpers.py::TestSetAndWriteDirectoryStatus::test_invalid_write",
"iridauploader/tests/core/test_upload_helpers.py::TestSetAndWriteDirectoryStatus::test_valid_write",
"iridauploader/tests/core/test_upload_helpers.py::TestSetAndWriteDirectoryStatus::test_valid_write_error",
"iridauploader/tests/core/test_upload_helpers.py::TestSetAndWriteDirectoryStatus::test_valid_write_partial",
"iridauploader/tests/core/test_upload_helpers.py::TestDirectoryHasReadonlyConflict::test_case_matrix",
"iridauploader/tests/core/test_upload_helpers.py::TestParseAndValidate::test_invalid_directory",
"iridauploader/tests/core/test_upload_helpers.py::TestParseAndValidate::test_invalid_validation",
"iridauploader/tests/core/test_upload_helpers.py::TestParseAndValidate::test_valid_parse",
"iridauploader/tests/core/test_upload_helpers.py::TestParseAndValidate::test_valid_partial_parse",
"iridauploader/tests/core/test_upload_helpers.py::TestSetUploadedSamplesToSkip::test_remove_one",
"iridauploader/tests/core/test_upload_helpers.py::TestVerifyUploadMode::test_invalid_upload_mode",
"iridauploader/tests/core/test_upload_helpers.py::TestVerifyUploadMode::test_valid_upload_modes",
"iridauploader/tests/core/test_upload_helpers.py::TestInitFileStatusListFromSequencingRun::test_all_functions_called",
"iridauploader/tests/core/test_upload_helpers.py::TestInitializeApi::test_invalid_init",
"iridauploader/tests/core/test_upload_helpers.py::TestInitializeApi::test_valid_init",
"iridauploader/tests/core/test_upload_helpers.py::TestIridaPrepAndValidation::test_invalid_connection",
"iridauploader/tests/core/test_upload_helpers.py::TestIridaPrepAndValidation::test_invalid_validation",
"iridauploader/tests/core/test_upload_helpers.py::TestIridaPrepAndValidation::test_valid_prep_and_validation",
"iridauploader/tests/core/test_upload_helpers.py::TestUploadSequencingRun::test_invalid_connection",
"iridauploader/tests/core/test_upload_helpers.py::TestUploadSequencingRun::test_invalid_file",
"iridauploader/tests/core/test_upload_helpers.py::TestUploadSequencingRun::test_invalid_resource",
"iridauploader/tests/core/test_upload_helpers.py::TestUploadSequencingRun::test_valid_partial_upload",
"iridauploader/tests/core/test_upload_helpers.py::TestUploadSequencingRun::test_valid_upload"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-12 19:24:45+00:00
|
apache-2.0
| 4,531 |
|
phac-nml__staramr-150
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c9bd308..6e6bce0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,7 @@
* Fixed up some Python warnings related to pandas (0.8.0.dev0).
* Adjusted `mlst` tests to account for differences in results for newer versions (0.8.0.dev0).
* Drop support for Python 3.5 as it leads to issues with managing dependency versions (0.8.0.dev0).
+* Switched from disallowing to generating a warning when the PointFinder organism is not one of the validated organisms (0.8.0.dev1).
# Version 0.7.2.zenodo0
diff --git a/staramr/__init__.py b/staramr/__init__.py
index 7556fbd..45fe705 100644
--- a/staramr/__init__.py
+++ b/staramr/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.8.0.dev0'
+__version__ = '0.8.0.dev1'
diff --git a/staramr/blast/pointfinder/PointfinderBlastDatabase.py b/staramr/blast/pointfinder/PointfinderBlastDatabase.py
index ef4b26b..58219a6 100644
--- a/staramr/blast/pointfinder/PointfinderBlastDatabase.py
+++ b/staramr/blast/pointfinder/PointfinderBlastDatabase.py
@@ -37,6 +37,13 @@ class PointfinderBlastDatabase(AbstractBlastDatabase):
def get_path(self, database_name):
return path.join(self.pointfinder_database_dir, database_name + self.fasta_suffix)
+ def is_validated(self):
+ """
+ Whether or not this particular PointFinder organism is part of the validated set.
+ :return: True if this PointFinder organism/database is validated, False otherwise.
+ """
+ return self.organism in self.get_available_organisms()
+
def get_resistance_codons(self, gene, codon_mutations):
"""
Gets a list of resistance codons from the given gene and codon mutations.
diff --git a/staramr/databases/BlastDatabaseRepositories.py b/staramr/databases/BlastDatabaseRepositories.py
index 95dd591..81c8cf4 100644
--- a/staramr/databases/BlastDatabaseRepositories.py
+++ b/staramr/databases/BlastDatabaseRepositories.py
@@ -1,7 +1,7 @@
import logging
import shutil
from collections import OrderedDict
-from typing import Dict
+from typing import Dict, List
from staramr.blast.AbstractBlastDatabase import AbstractBlastDatabase
from staramr.blast.plasmidfinder.PlasmidfinderBlastDatabase import PlasmidfinderBlastDatabase
@@ -87,6 +87,9 @@ class BlastDatabaseRepositories:
for name, repo in self._database_repositories.items():
info.update(repo.info())
+ if name == 'pointfinder':
+ info['pointfinder_organisms_all'] = ', '.join(sorted(self.get_pointfinder_organisms()))
+ info['pointfinder_organisms_valid'] = ', '.join(sorted(self.get_valid_pointfinder_organisms()))
return info
@@ -142,6 +145,25 @@ class BlastDatabaseRepositories:
return repos
+ def get_pointfinder_organisms(self) -> List[str]:
+ """
+ Gets a list of all pointfinder organisms from this database.
+ :return: A list of PointFinder organisms from this database.
+ """
+ try:
+ return PointfinderBlastDatabase.get_organisms(self.get_repo_dir('pointfinder'))
+ except FileNotFoundError as e:
+ logger.debug(e)
+ return []
+
+
+ def get_valid_pointfinder_organisms(self) -> List[str]:
+ """
+ Gets a list of all valid pointfinder organisms.
+ :return: A list of all valid pointfinder organisms.
+ """
+ return PointfinderBlastDatabase.get_available_organisms()
+
def build_blast_database(self, database_name: str, options: Dict[str, str] = {}) -> AbstractBlastDatabase:
"""
Builds a staramr.blast.AbstractBlastDatabase from the given parameters.
diff --git a/staramr/subcommand/Database.py b/staramr/subcommand/Database.py
index 18da93f..131bbbd 100644
--- a/staramr/subcommand/Database.py
+++ b/staramr/subcommand/Database.py
@@ -286,6 +286,13 @@ class Info(Database):
arg_drug_table = ARGDrugTable()
+ def write_database_info(database_repos):
+ database_info = database_repos.info()
+ database_info['mlst_version'] = JobHandler.get_mlst_version(JobHandler)
+
+ database_info.update(arg_drug_table.get_resistance_table_info())
+ sys.stdout.write(get_string_with_spacing(database_info))
+
if len(args.directories) == 0:
database_repos = AMRDatabasesManager.create_default_manager().get_database_repos()
if not AMRDatabasesManager.is_database_repos_default_commits(database_repos):
@@ -294,12 +301,7 @@ class Info(Database):
"AMR genes depending on how the database files are structured.")
try:
- database_info = database_repos.info()
- database_info['mlst_version'] = JobHandler.get_mlst_version(JobHandler)
-
- database_info.update(arg_drug_table.get_resistance_table_info())
- sys.stdout.write(get_string_with_spacing(database_info))
-
+ write_database_info(database_repos)
except DatabaseNotFoundException as e:
logger.error("No database found. Perhaps try restoring the default with 'staramr db restore-default'")
else:
@@ -312,9 +314,7 @@ class Info(Database):
"differences in the detected AMR genes depending on how the database files are structured.",
directory)
- database_info = database_repos.info()
- database_info.update(arg_drug_table.get_resistance_table_info())
- sys.stdout.write(get_string_with_spacing(database_info))
+ write_database_info(database_repos)
except DatabaseNotFoundException as e:
logger.error("Database not found in [%s]. Perhaps try building with 'staramr db build --dir %s'",
directory, directory)
diff --git a/staramr/subcommand/Search.py b/staramr/subcommand/Search.py
index 43e8f43..468fe13 100644
--- a/staramr/subcommand/Search.py
+++ b/staramr/subcommand/Search.py
@@ -55,11 +55,15 @@ class Search(SubCommand):
help='Search for AMR genes')
self._default_database_dir = AMRDatabasesManager.get_default_database_directory()
+ default_database_repos = AMRDatabasesManager.create_default_manager().get_database_repos()
+
cpu_count = multiprocessing.cpu_count()
arg_parser.add_argument('--pointfinder-organism', action='store', dest='pointfinder_organism', type=str,
- help='The organism to use for pointfinder {' + ', '.join(
- PointfinderBlastDatabase.get_available_organisms()) + '}. Defaults to disabling search for point mutations. [None].',
+ help=(f'The organism to use for pointfinder. '
+ f"Validated: {set(default_database_repos.get_valid_pointfinder_organisms())}. "
+ f"All: {set(default_database_repos.get_pointfinder_organisms())}. "
+ f"Defaults to disabling search for point mutations. [None]."),
default=None, required=False)
arg_parser.add_argument('--plasmidfinder-database-type', action='store', dest='plasmidfinder_database_type',
type=str,
@@ -293,10 +297,20 @@ class Search(SubCommand):
logger.info("Finished. Took %s minutes.", time_difference_minutes)
+ included_pointfinder = pointfinder_database is not None
+
settings = database_repos.info()
settings['mlst_version'] = JobHandler.get_mlst_version(JobHandler)
settings['command_line'] = ' '.join(sys.argv)
+ settings['pointfinder_organism'] = pointfinder_database.organism if included_pointfinder else 'None'
+
+ if included_pointfinder and not pointfinder_database.is_validated():
+ settings['messages'] = (f'Warning: Selected organism [{pointfinder_database.organism}] is '
+ f'not part of the validated set of organisms for PointFinder:'
+ f' {set(pointfinder_database.get_available_organisms())}. Cannot guarantee that all '
+ f'point mutations were detected properly.')
+
settings['version'] = self._version
settings['start_time'] = start_time.strftime(self.TIME_FORMAT)
settings['end_time'] = end_time.strftime(self.TIME_FORMAT)
@@ -352,9 +366,15 @@ class Search(SubCommand):
resfinder_database = database_repos.build_blast_database('resfinder')
if (args.pointfinder_organism):
- if args.pointfinder_organism not in PointfinderBlastDatabase.get_available_organisms():
- raise CommandParseException("The only Pointfinder organism(s) currently supported are " + str(
- PointfinderBlastDatabase.get_available_organisms()), self._root_arg_parser)
+ if args.pointfinder_organism not in database_repos.get_pointfinder_organisms():
+ raise CommandParseException(f"The organism \"{args.pointfinder_organism}\" is not found in the selected PointFinder database. The "
+ f"only organisms available are: {set(database_repos.get_pointfinder_organisms())}. "
+ f"Of these, only {set(PointfinderBlastDatabase.get_available_organisms())} have been validated.",
+ self._root_arg_parser)
+ elif args.pointfinder_organism not in PointfinderBlastDatabase.get_available_organisms():
+ logger.warning("The only validated Pointfinder organism(s) are " + str(
+ set(PointfinderBlastDatabase.get_available_organisms())) + f'. By selecting "{args.pointfinder_organism}" you are not guaranteed ' \
+ + 'that all point mutations in this PointFinder database will be properly detected.')
pointfinder_database = database_repos.build_blast_database('pointfinder',
{'organism': args.pointfinder_organism})
else:
|
phac-nml/staramr
|
1925f188764f42e054d5982a2fe299e1a868cf82
|
diff --git a/staramr/tests/integration/databases/test_BlastDatabaseRepositories.py b/staramr/tests/integration/databases/test_BlastDatabaseRepositories.py
index f9f3924..1e09e19 100644
--- a/staramr/tests/integration/databases/test_BlastDatabaseRepositories.py
+++ b/staramr/tests/integration/databases/test_BlastDatabaseRepositories.py
@@ -90,5 +90,9 @@ class BlastDatabaseRepositoriesIT(unittest.TestCase):
'Resfinder commits invalid')
self.assertEqual(database_info['pointfinder_db_commit'], self.POINTFINDER_VALID_COMMIT,
'Pointfinder commits invalid')
+ self.assertEqual(database_info['pointfinder_organisms_all'], 'campylobacter, e.coli, gonorrhoeae, salmonella, tuberculosis',
+ 'Pointfinder organisms are invalid')
+ self.assertEqual(database_info['pointfinder_organisms_valid'], 'campylobacter, salmonella',
+ 'Pointfinder organisms are invalid')
self.assertEqual(database_info['plasmidfinder_db_commit'], self.PLASMIDFINDER_VALID_COMMIT,
'Plasmidfinder commits invalid')
|
Change strict restriction on PointFinder organisms to be a warning
Currently, running with `--pointfinder-organism ORGANISM` with anything other than `salmonella` or `campylobacter` will result in an error due to a check I perform against a list of acceptable organisms here:
https://github.com/phac-nml/staramr/blob/107e2cd8a60cc9a3acce35ff6ede9ff4e75659c2/staramr/blast/pointfinder/PointfinderBlastDatabase.py#L78-L83
This was done to ensure that I only allow results to be generated from PointFinder databases/organisms that I have validated work with staramr. But, there are additional mutations from the PointFinder databases belonging to other organisms that could still be useful to run with staramar, even if not every mutation (or indel) will work properly.
Instead of restricting which organisms can be used by staramr for PointFinder I wish to switch this to a warning. That is, if `--pointfinder-organism ORGANISM` is set to anything outside of the list of validated organisms, results will still be generated by staramr, but a warning message will be printed.
|
0.0
|
1925f188764f42e054d5982a2fe299e1a868cf82
|
[
"staramr/tests/integration/databases/test_BlastDatabaseRepositories.py::BlastDatabaseRepositoriesIT::testInfo"
] |
[
"staramr/tests/integration/databases/test_BlastDatabaseRepositories.py::BlastDatabaseRepositoriesIT::testBuild",
"staramr/tests/integration/databases/test_BlastDatabaseRepositories.py::BlastDatabaseRepositoriesIT::testUpdate"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-20 18:35:47+00:00
|
apache-2.0
| 4,532 |
|
phanrahan__magma-1091
|
diff --git a/magma/__init__.py b/magma/__init__.py
index 22ea0b7d..275bbb5a 100644
--- a/magma/__init__.py
+++ b/magma/__init__.py
@@ -119,6 +119,7 @@ from magma.stubify import (
from magma.compile import MagmaCompileException
from magma.linking import link_module, link_default_module, clear_link_info
import magma.math
+from magma.value_utils import fill
################################################################################
# BEGIN ALIASES
diff --git a/magma/conversions.py b/magma/conversions.py
index 21128210..0f6d479b 100644
--- a/magma/conversions.py
+++ b/magma/conversions.py
@@ -60,7 +60,7 @@ def can_convert_to_bit_type(value):
return issubclass(magma_type(value), (Digital, Array, Tuple))
-def convertbit(value, totype):
+def convertbit(value, totype, name=None):
# NOTE: We don't do `isinstance` here because we want an upcast to cause a
# conversion
value = magma_value(value)
@@ -90,41 +90,44 @@ def convertbit(value, totype):
# invariant
return totype(1) if value else totype(0)
+ if name is None:
+ name = value.name
+
if value.is_input():
- b = In(totype)(name=value.name)
+ b = In(totype)(name=name)
elif value.is_output():
- b = Out(totype)(name=value.name)
+ b = Out(totype)(name=name)
else:
- b = totype()
+ b = totype(name=name)
b._wire = value._wire
return b
-def bit(value):
- return convertbit(value, Bit)
+def bit(value, name=None):
+ return convertbit(value, Bit, name=name)
-def clock(value):
- return convertbit(value, Clock)
+def clock(value, name=None):
+ return convertbit(value, Clock, name=name)
-def reset(value):
- return convertbit(value, Reset)
+def reset(value, name=None):
+ return convertbit(value, Reset, name=name)
-def asyncreset(value):
- return convertbit(value, AsyncReset)
+def asyncreset(value, name=None):
+ return convertbit(value, AsyncReset, name=name)
-def asyncresetn(value):
- return convertbit(value, AsyncResetN)
+def asyncresetn(value, name=None):
+ return convertbit(value, AsyncResetN, name=name)
-def enable(value):
- return convertbit(value, Enable)
+def enable(value, name=None):
+ return convertbit(value, Enable, name=name)
-def convertbits(value, n, totype, checkbit):
+def convertbits(value, n, totype, checkbit, name=None):
# NOTE: We don't do `isinstance` here because we want an upcast to cause a
# conversion
if type(value) is totype:
@@ -200,30 +203,34 @@ def convertbits(value, n, totype, checkbit):
Direction.InOut: InOut
}[T.direction](Bit)
- value = totype[len(Ts), T](ts)
+ value = totype[len(Ts), T](ts, name=name)
if n is not None and len(value) < n:
+ # TODO(leonardt): The extended value isn't named, but perhaps we'd like to move
+ # to an explicit convert + extend rather than doing them in a single
+ # operation? If so, then we could provide the same name interface for
+ # the extension operators.
value = value.ext_to(n)
return value
-def array(value, n=None):
- return convertbits(value, n, Array, False)
+def array(value, n=None, name=None):
+ return convertbits(value, n, Array, False, name=name)
-def bits(value, n=None):
- return convertbits(value, n, Bits, True)
+def bits(value, n=None, name=None):
+ return convertbits(value, n, Bits, True, name=name)
-def uint(value, n=None):
- return convertbits(value, n, UInt, True)
+def uint(value, n=None, name=None):
+ return convertbits(value, n, UInt, True, name=name)
-def sint(value, n=None):
- return convertbits(value, n, SInt, True)
+def sint(value, n=None, name=None):
+ return convertbits(value, n, SInt, True, name=name)
-def bfloat(value, n=None):
- return convertbits(value, n, BFloat, True)
+def bfloat(value, n=None, name=None):
+ return convertbits(value, n, BFloat, True, name=name)
def concat(*args):
diff --git a/magma/value_utils.py b/magma/value_utils.py
index 9c745c17..4941fa34 100644
--- a/magma/value_utils.py
+++ b/magma/value_utils.py
@@ -103,3 +103,20 @@ def _make_selector_impl(value, child):
def make_selector(value):
return _make_selector_impl(value, None)
+
+
+class _FillVisitor(ValueVisitor):
+ def __init__(self, fill_value: bool):
+ self._fill_value = fill_value
+
+ def visit_Digital(self, value):
+ value @= self._fill_value
+
+ def visit_Bits(self, value):
+ size = len(value)
+ fill_value = 0 if not self._fill_value else (1 << size) - 1
+ value @= fill_value
+
+
+def fill(value, fill_value: bool):
+ _FillVisitor(fill_value).visit(value)
|
phanrahan/magma
|
2dda9c846a2d9a7b37b818a7e47f7c78bd31baba
|
diff --git a/tests/test_conversion.py b/tests/test_conversion.py
index 145525b1..323729e8 100644
--- a/tests/test_conversion.py
+++ b/tests/test_conversion.py
@@ -84,3 +84,19 @@ def test_bits_upcast(T):
def test_bit_upcast():
x = m.Enable()
assert type(m.bit(x)) is m.Bit
+
+
[email protected](
+ 'T,conversion,expected_T',
+ [
+ (m.Bit, m.clock, m.Clock),
+ (m.Bits[4], m.uint, m.UInt),
+ (m.UInt[4], m.bits, m.Bits),
+ (m.Reset, m.bit, m.Bit)
+ ]
+)
+def test_name(T, conversion, expected_T):
+ x = T(name="x")
+ y = conversion(x, name="y")
+ assert str(y) == 'y'
+ assert isinstance(y, expected_T)
diff --git a/tests/test_value_utils.py b/tests/test_value_utils.py
index bbaeae78..7cad99c2 100644
--- a/tests/test_value_utils.py
+++ b/tests/test_value_utils.py
@@ -1,6 +1,13 @@
+import pytest
+
import magma as m
-from magma.value_utils import (ValueVisitor, ArraySelector, TupleSelector,
- make_selector)
+from magma.value_utils import (
+ ValueVisitor,
+ ArraySelector,
+ TupleSelector,
+ make_selector,
+ fill,
+)
class _Prod(m.Product):
@@ -67,3 +74,26 @@ def test_selector():
assert str(selector) == "[0].x[0]"
assert selector.select(_Foo.I) is _Foo.I[0].x[0]
+
+
[email protected]("fill_value", (True, False))
+def test_fill(fill_value):
+ S = m.AnonProduct[{"x": m.Bits[8], "y": m.Bit}]
+ T = m.AnonProduct[{"s": S, "u": m.Array[4, m.Bits[6]]}]
+
+ t = T()
+ fill(t, fill_value)
+
+ value = t.value()
+ assert value is not None
+
+ assert value.s.const()
+ assert value.s.x.const()
+ assert value.s.y.const()
+ assert int(value.s.x) == (0 if not fill_value else 255)
+ assert int(value.s.y) == (0 if not fill_value else 1)
+
+ assert value.u.const()
+ for u_i in value.u:
+ assert u_i.const()
+ assert int(u_i) == (0 if not fill_value else 63)
|
[Types] Add fill (with 0 or 1) function for all types
|
0.0
|
2dda9c846a2d9a7b37b818a7e47f7c78bd31baba
|
[
"tests/test_conversion.py::test_concat_basic",
"tests/test_conversion.py::test_concat_bit",
"tests/test_conversion.py::test_ext",
"tests/test_conversion.py::test_concat_type_error",
"tests/test_conversion.py::test_convert_extend[uint]",
"tests/test_conversion.py::test_convert_extend[sint]",
"tests/test_conversion.py::test_concat_output_type[Targs0-Bits]",
"tests/test_conversion.py::test_concat_output_type[Targs1-SInt]",
"tests/test_conversion.py::test_concat_output_type[Targs2-UInt]",
"tests/test_conversion.py::test_concat_output_type[Targs3-Bits]",
"tests/test_conversion.py::test_concat_output_type[Targs4-Bits]",
"tests/test_conversion.py::test_bits_upcast[UInt[(4,",
"tests/test_conversion.py::test_bits_upcast[SInt[(4,",
"tests/test_conversion.py::test_bit_upcast",
"tests/test_conversion.py::test_name[Bit-clock-Clock]",
"tests/test_conversion.py::test_name[Bits[(4,",
"tests/test_conversion.py::test_name[UInt[(4,",
"tests/test_conversion.py::test_name[Reset-bit-Bit]",
"tests/test_value_utils.py::test_value_visitor",
"tests/test_value_utils.py::test_selector",
"tests/test_value_utils.py::test_fill[True]",
"tests/test_value_utils.py::test_fill[False]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-10 18:35:36+00:00
|
mit
| 4,533 |
|
phfaist__pylatexenc-4
|
diff --git a/pylatexenc/latex2text.py b/pylatexenc/latex2text.py
index 874ae8d..da2694e 100644
--- a/pylatexenc/latex2text.py
+++ b/pylatexenc/latex2text.py
@@ -23,11 +23,17 @@
#
+from __future__ import print_function, absolute_import
import os
import re
import unicodedata
-import latexwalker
import logging
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
+
+from pylatexenc import latexwalker
logger = logging.getLogger(__name__);
@@ -498,7 +504,7 @@ class LatexNodes2Text(object):
#
if (len(n.nodeargs) != 1):
- logger.warning(ur"Expected exactly one argument for '\input' ! Got = %r", n.nodeargs)
+ logger.warning(u"Expected exactly one argument for '\\input' ! Got = %r", n.nodeargs)
inputtex = self.read_input_file(self.nodelist_to_text([n.nodeargs[0]]).strip())
@@ -663,17 +669,17 @@ if __name__ == '__main__':
import fileinput
- print "Please type some latex text (Ctrl+D twice to stop) ..."
+ print("Please type some latex text (Ctrl+D twice to stop) ...")
latex = ''
for line in fileinput.input():
latex += line;
- print '\n--- WORDS ---\n'
- print latex2text(latex.decode('utf-8')#, keep_inline_math=True
- ).encode('utf-8')
- print '\n-------------\n'
+ print('\n--- WORDS ---\n')
+ print(latex2text(latex.decode('utf-8')#, keep_inline_math=True
+ ).encode('utf-8'))
+ print('\n-------------\n')
except:
import pdb;
@@ -681,8 +687,8 @@ if __name__ == '__main__':
import sys;
(exc_type, exc_value, exc_traceback) = sys.exc_info()
- print "\nEXCEPTION: " + unicode(sys.exc_value) + "\n"
-
+ print("\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n")
+
pdb.post_mortem()
diff --git a/pylatexenc/latexencode.py b/pylatexenc/latexencode.py
index 0b54878..98b87fb 100644
--- a/pylatexenc/latexencode.py
+++ b/pylatexenc/latexencode.py
@@ -24,8 +24,13 @@
#
+from __future__ import print_function, absolute_import
import unicodedata;
import logging
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
log = logging.getLogger(__name__)
@@ -873,20 +878,20 @@ if __name__ == '__main__':
import fileinput
- print "Please type some unicode text (Ctrl+D twice to stop) ..."
+ print("Please type some unicode text (Ctrl+D twice to stop) ...")
latex = ''
for line in fileinput.input():
latex += line;
- print '\n--- LATEX ---\n'
- print utf8tolatex(latex.decode('utf-8')).encode('utf-8')
- print '\n-------------\n'
+ print('\n--- LATEX ---\n')
+ print(utf8tolatex(latex.decode('utf-8')).encode('utf-8'))
+ print('\n-------------\n')
except:
import pdb;
import sys;
- print "\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n"
+ print("\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n")
pdb.post_mortem()
diff --git a/pylatexenc/latexwalker.py b/pylatexenc/latexwalker.py
index eef3e4b..7fcbe3f 100644
--- a/pylatexenc/latexwalker.py
+++ b/pylatexenc/latexwalker.py
@@ -23,12 +23,16 @@
#
+from __future__ import print_function, absolute_import
import logging
logger = logging.getLogger(__name__)
import re
from collections import namedtuple
-
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
class LatexWalkerError(Exception):
@@ -1223,9 +1227,9 @@ def disp_node(n, indent=0, context='* ', skip_group=False):
title = '\\begin{%s}' %(n.envname)
iterchildren.append(('* ', n.nodelist, False));
else:
- print "UNKNOWN NODE TYPE: %s"%(n.nodeType().__name__)
+ print("UNKNOWN NODE TYPE: %s"%(n.nodeType().__name__))
- print ' '*indent + context + title + ' '+comment
+ print(' '*indent + context + title + ' '+comment)
for context, nodelist, skip in iterchildren:
for nn in nodelist:
@@ -1252,18 +1256,18 @@ if __name__ == '__main__':
(nodes, pos, llen) = get_latex_nodes(latex);
- print '\n--- NODES ---\n'
- print repr(nodes);
- print '\n-------------\n'
+ print('\n--- NODES ---\n')
+ print(repr(nodes))
+ print('\n-------------\n')
- print '\n--- NODES ---\n'
+ print('\n--- NODES ---\n')
for n in nodes:
disp_node(n)
- print '\n-------------\n'
+ print('\n-------------\n')
except:
import pdb;
import sys;
- print "\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n"
+ print("\nEXCEPTION: " + unicode(sys.exc_info()[1]) + "\n")
pdb.post_mortem()
|
phfaist/pylatexenc
|
9919400250e204e7990821210c8c0035bede2eaa
|
diff --git a/test/test_latexwalker.py b/test/test_latexwalker.py
index 5acf7d3..2be6f0f 100644
--- a/test/test_latexwalker.py
+++ b/test/test_latexwalker.py
@@ -1,4 +1,8 @@
import unittest
+import sys
+if sys.version_info.major > 2:
+ def unicode(string): return string
+ basestring = str
from pylatexenc.latexwalker import (
MacrosDef, LatexWalker, LatexToken, LatexCharsNode, LatexGroupNode, LatexCommentNode,
|
ur"Expected exactly (...)" string gives SyntaxError: invalid syntax
Got syntax error when importing ```pylatexenc.latex2text``` module (Python 3.6, Mac, Anaconda3 distrubution):
```
import pylatexenc.latex2text
File "/Users/rasmus/anaconda/envs/tts/lib/python3.6/site-packages/pylatexenc/latex2text.py", line 501
logger.warning(ur"Expected exactly one argument for '\input' ! Got = %r", n.nodeargs)
^
SyntaxError: invalid syntax
```
Python 3.5+ does not support `ur` prefix. To fix, use either `u` or `r` string but not both.
|
0.0
|
9919400250e204e7990821210c8c0035bede2eaa
|
[
"test/test_latexwalker.py::TestLatexWalker::test_errors",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_braced_group",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_environment",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_expression",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_maybe_optional_arg",
"test/test_latexwalker.py::TestLatexWalker::test_get_latex_nodes",
"test/test_latexwalker.py::TestLatexWalker::test_get_token"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-02-16 22:40:59+00:00
|
mit
| 4,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.