instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
taverntesting__tavern-851
|
diff --git a/tavern/_core/exceptions.py b/tavern/_core/exceptions.py
index d8c78b6..803abe0 100644
--- a/tavern/_core/exceptions.py
+++ b/tavern/_core/exceptions.py
@@ -114,7 +114,7 @@ class InvalidFormattedJsonError(TavernException):
"""Tried to use the magic json format tag in an invalid way"""
-class InvalidExtBlockException(TavernException):
+class MisplacedExtBlockException(TavernException):
"""Tried to use the '$ext' block in a place it is no longer valid to use it"""
def __init__(self, block) -> None:
diff --git a/tavern/_core/extfunctions.py b/tavern/_core/extfunctions.py
index c7b3325..e7b534e 100644
--- a/tavern/_core/extfunctions.py
+++ b/tavern/_core/extfunctions.py
@@ -123,6 +123,11 @@ def get_wrapped_create_function(ext: Mapping):
def _get_ext_values(ext: Mapping):
+ if not isinstance(ext, Mapping):
+ raise exceptions.InvalidExtFunctionError(
+ "ext block should be a dict, but it was a {}".format(type(ext))
+ )
+
args = ext.get("extra_args") or ()
kwargs = ext.get("extra_kwargs") or {}
try:
@@ -145,14 +150,23 @@ def update_from_ext(request_args: dict, keys_to_check: List[str]) -> None:
"""
new_args = {}
+ logger = _getlogger()
for key in keys_to_check:
try:
- func = get_wrapped_create_function(request_args[key].pop("$ext"))
- except (KeyError, TypeError, AttributeError):
- pass
- else:
- new_args[key] = func()
+ block = request_args[key]
+ except KeyError:
+ logger.debug("No %s block", key)
+ continue
+
+ try:
+ pop = block.pop("$ext")
+ except (KeyError, AttributeError, TypeError):
+ logger.debug("No ext functions in %s block", key)
+ continue
+
+ func = get_wrapped_create_function(pop)
+ new_args[key] = func()
merged_args = deep_dict_merge(request_args, new_args)
diff --git a/tavern/_plugins/mqtt/request.py b/tavern/_plugins/mqtt/request.py
index db7eae7..0a9de87 100644
--- a/tavern/_plugins/mqtt/request.py
+++ b/tavern/_plugins/mqtt/request.py
@@ -1,7 +1,7 @@
import functools
import json
import logging
-from typing import Mapping
+from typing import Dict
from box.box import Box
@@ -16,21 +16,19 @@ from tavern.request import BaseRequest
logger = logging.getLogger(__name__)
-def get_publish_args(rspec: Mapping, test_block_config: TestConfig) -> dict:
- """Format mqtt request args
-
- Todo:
- Anything else to do here?
- """
+def get_publish_args(rspec: Dict, test_block_config: TestConfig) -> dict:
+ """Format mqtt request args and update using ext functions"""
fspec = format_keys(rspec, test_block_config.variables)
- if "json" in rspec:
- if "payload" in rspec:
+ if "json" in fspec:
+ if "payload" in fspec:
raise exceptions.BadSchemaError(
"Can only specify one of 'payload' or 'json' in MQTT request"
)
+ update_from_ext(fspec, ["json"])
+
fspec["payload"] = json.dumps(fspec.pop("json"))
return fspec
@@ -43,15 +41,15 @@ class MQTTRequest(BaseRequest):
"""
def __init__(
- self, client: MQTTClient, rspec: Mapping, test_block_config: TestConfig
+ self, client: MQTTClient, rspec: Dict, test_block_config: TestConfig
) -> None:
expected = {"topic", "payload", "json", "qos", "retain"}
check_expected_keys(expected, rspec)
publish_args = get_publish_args(rspec, test_block_config)
- update_from_ext(publish_args, ["json"])
+ self._publish_args = publish_args
self._prepared = functools.partial(client.publish, **publish_args)
# Need to do this here because get_publish_args will modify the original
diff --git a/tavern/_plugins/mqtt/response.py b/tavern/_plugins/mqtt/response.py
index a2e362b..73513f8 100644
--- a/tavern/_plugins/mqtt/response.py
+++ b/tavern/_plugins/mqtt/response.py
@@ -335,7 +335,7 @@ class _MessageVerifier:
json_payload = True
if payload.pop("$ext", None):
- raise exceptions.InvalidExtBlockException(
+ raise exceptions.MisplacedExtBlockException(
"json",
)
elif "payload" in expected:
diff --git a/tavern/_plugins/rest/response.py b/tavern/_plugins/rest/response.py
index 97bc494..ca54b11 100644
--- a/tavern/_plugins/rest/response.py
+++ b/tavern/_plugins/rest/response.py
@@ -218,7 +218,7 @@ class RestResponse(BaseResponse):
if isinstance(expected_block, dict):
if expected_block.pop("$ext", None):
- raise exceptions.InvalidExtBlockException(
+ raise exceptions.MisplacedExtBlockException(
blockname,
)
diff --git a/tavern/response.py b/tavern/response.py
index f9eba9f..8bee7a6 100644
--- a/tavern/response.py
+++ b/tavern/response.py
@@ -139,7 +139,7 @@ class BaseResponse:
if isinstance(block, dict):
check_ext_functions(block.get("$ext", None))
if nfuncs != len(self.validate_functions):
- raise exceptions.InvalidExtBlockException(
+ raise exceptions.MisplacedExtBlockException(
name,
)
|
taverntesting/tavern
|
7e624698ad534342bfc302bb1216eeb5e214b240
|
diff --git a/example/mqtt/test_mqtt.tavern.yaml b/example/mqtt/test_mqtt.tavern.yaml
index 145d3b6..956a18a 100644
--- a/example/mqtt/test_mqtt.tavern.yaml
+++ b/example/mqtt/test_mqtt.tavern.yaml
@@ -740,3 +740,28 @@ stages:
payload: "there"
timeout: 5
qos: 1
+
+---
+
+test_name: Update an MQTT publish from an ext function
+
+includes:
+ - !include common.yaml
+
+paho-mqtt: *mqtt_spec
+
+stages:
+ - *setup_device_for_test
+
+ - name: step 1 - ping/pong
+ mqtt_publish:
+ topic: /device/{random_device_id}/echo
+ json:
+ $ext:
+ function: testing_utils:return_hello
+ mqtt_response:
+ topic: /device/{random_device_id}/echo/response
+ timeout: 3
+ qos: 1
+ json:
+ hello: there
diff --git a/example/mqtt/testing_utils.py b/example/mqtt/testing_utils.py
index 70021ab..f483ca9 100644
--- a/example/mqtt/testing_utils.py
+++ b/example/mqtt/testing_utils.py
@@ -3,5 +3,5 @@ def message_says_hello(msg):
assert msg.payload.get("message") == "hello world"
-def return_hello(_):
+def return_hello(_=None):
return {"hello": "there"}
diff --git a/tests/unit/test_mqtt.py b/tests/unit/test_mqtt.py
index b9603da..1d55b06 100644
--- a/tests/unit/test_mqtt.py
+++ b/tests/unit/test_mqtt.py
@@ -1,3 +1,4 @@
+from typing import Dict
from unittest.mock import MagicMock, Mock, patch
import paho.mqtt.client as paho
@@ -18,18 +19,19 @@ def test_host_required():
MQTTClient(**args)
-class TestClient:
- @pytest.fixture(name="fake_client")
- def fix_fake_client(self):
- args = {"connect": {"host": "localhost"}}
[email protected](name="fake_client")
+def fix_fake_client():
+ args = {"connect": {"host": "localhost"}}
+
+ mqtt_client = MQTTClient(**args)
- mqtt_client = MQTTClient(**args)
+ mqtt_client._subscribed[2] = _Subscription("abc")
+ mqtt_client._subscription_mappings["abc"] = 2
- mqtt_client._subscribed[2] = _Subscription("abc")
- mqtt_client._subscription_mappings["abc"] = 2
+ return mqtt_client
- return mqtt_client
+class TestClient:
def test_no_queue(self, fake_client):
"""Trying to fetch from a nonexistent queue raised exception"""
@@ -192,3 +194,33 @@ class TestSubscription:
MQTTClient._on_subscribe(mock_client, "abc", {}, 123, 0)
assert mock_client._subscribed == {}
+
+
+class TestExtFunctions:
+ @pytest.fixture()
+ def basic_mqtt_request_args(self) -> Dict:
+ return {
+ "topic": "/a/b/c",
+ }
+
+ def test_basic(self, fake_client, basic_mqtt_request_args, includes):
+ MQTTRequest(fake_client, basic_mqtt_request_args, includes)
+
+ def test_ext_function_bad(self, fake_client, basic_mqtt_request_args, includes):
+ basic_mqtt_request_args["json"] = {"$ext": "kk"}
+
+ with pytest.raises(exceptions.InvalidExtFunctionError):
+ MQTTRequest(fake_client, basic_mqtt_request_args, includes)
+
+ def test_ext_function_good(self, fake_client, basic_mqtt_request_args, includes):
+ basic_mqtt_request_args["json"] = {
+ "$ext": {
+ "function": "operator:add",
+ "extra_args": (1, 2),
+ }
+ }
+
+ m = MQTTRequest(fake_client, basic_mqtt_request_args, includes)
+
+ assert "payload" in m._publish_args
+ assert m._publish_args["payload"] == "3"
|
Unable to use external function in MQTT publish
I tried using an external function in a MQTT publish request, and the function wasn't getting evaluated.
My request looks like this:
```
- id: publish_thing_1
name: Publish thing 1
mqtt_publish:
topic: &ping_topic '/device/123/ping'
qos: 1
json:
$ext:
function: utils.testing_utils:my_function
thing_1: abc
mqtt_response:
topic: *ping_topic
json:
header: test
thing_1: abc
timeout: 5
qos: 1
```
It looks like the line below in [request.py](https://github.com/taverntesting/tavern/blob/master/tavern/_plugins/mqtt/request.py)
```
update_from_ext(publish_args, ["json"], test_block_config)
```
should be
```
update_from_ext(publish_args, ["payload"], test_block_config)
```
instead, since publish_args looks like this:
```
{'topic': '/device/123/ping', 'qos': 1, 'payload': '{"$ext": {"function": "utils.testing_utils:my_function"}, "thing_1": "abc"}'}
```
Note that the `payload` value is a string, which prevents external function evaluation even after I do the `update_from_ext` change. Before I go down the rabbit hole too much, I wanted to confirm that I've configured the request properly, and that this feature is expected to work after this [PR](https://github.com/taverntesting/tavern/pull/620).
|
0.0
|
7e624698ad534342bfc302bb1216eeb5e214b240
|
[
"tests/unit/test_mqtt.py::TestExtFunctions::test_ext_function_bad",
"tests/unit/test_mqtt.py::TestExtFunctions::test_ext_function_good"
] |
[
"tests/unit/test_mqtt.py::test_host_required",
"tests/unit/test_mqtt.py::TestClient::test_no_queue",
"tests/unit/test_mqtt.py::TestClient::test_no_message",
"tests/unit/test_mqtt.py::TestClient::test_message_queued",
"tests/unit/test_mqtt.py::TestClient::test_context_connection_failure",
"tests/unit/test_mqtt.py::TestClient::test_context_connection_success",
"tests/unit/test_mqtt.py::TestClient::test_assert_message_published",
"tests/unit/test_mqtt.py::TestClient::test_assert_message_published_unknown_err",
"tests/unit/test_mqtt.py::TestTLS::test_missing_cert_gives_error",
"tests/unit/test_mqtt.py::TestTLS::test_disabled_tls",
"tests/unit/test_mqtt.py::TestTLS::test_invalid_tls_ver",
"tests/unit/test_mqtt.py::TestRequests::test_unknown_fields",
"tests/unit/test_mqtt.py::TestRequests::test_missing_format",
"tests/unit/test_mqtt.py::TestRequests::test_correct_format",
"tests/unit/test_mqtt.py::TestSubscription::test_handles_subscriptions",
"tests/unit/test_mqtt.py::TestSubscription::test_no_subscribe_on_err",
"tests/unit/test_mqtt.py::TestSubscription::test_no_subscribe_on_unrecognised_suback",
"tests/unit/test_mqtt.py::TestExtFunctions::test_basic"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-16 12:53:45+00:00
|
mit
| 5,835 |
|
taxprofiler__taxpasta-132
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d4c4a92..0678f92 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
`merge` commands, which inserts a new column `rank_lineage` to results that
contains semi-colon-separated strings with the ranks (#130).
+### Changed
+
+- Reversed the order of lineages printed to output files (#131).
+
## [0.4.1] - (2023-07-13)
### Fixed
diff --git a/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py b/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py
index 9635340..4141e9f 100644
--- a/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py
+++ b/src/taxpasta/infrastructure/domain/service/taxopy_taxonomy_service.py
@@ -73,7 +73,7 @@ class TaxopyTaxonomyService(TaxonomyService):
taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db)
except TaxidError:
return None
- return taxon.name_lineage
+ return list(reversed(taxon.name_lineage))
def get_taxon_identifier_lineage(self, taxonomy_id: int) -> Optional[List[int]]:
"""Return the lineage of a given taxonomy identifier as identifiers."""
@@ -81,7 +81,7 @@ class TaxopyTaxonomyService(TaxonomyService):
taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db)
except TaxidError:
return None
- return taxon.taxid_lineage
+ return list(reversed(taxon.taxid_lineage))
def get_taxon_rank_lineage(self, taxonomy_id: int) -> Optional[List[str]]:
"""Return the lineage of a given taxonomy identifier as ranks."""
@@ -89,7 +89,7 @@ class TaxopyTaxonomyService(TaxonomyService):
taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db)
except TaxidError:
return None
- return list(taxon.rank_name_dictionary.keys())
+ return list(reversed(taxon.rank_name_dictionary.keys()))
def add_name(self, table: DataFrame[ResultTable]) -> DataFrame[ResultTable]:
"""Add a column for the taxon name to the given table."""
@@ -123,11 +123,10 @@ class TaxopyTaxonomyService(TaxonomyService):
def _name_lineage_as_str(self, taxonomy_id: int) -> Optional[str]:
"""Return the lineage of a taxon as concatenated names."""
- try:
- taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db)
- except TaxidError:
+ if lineage := self.get_taxon_name_lineage(taxonomy_id):
+ return ";".join(lineage)
+ else:
return None
- return ";".join(taxon.name_lineage)
def add_identifier_lineage(
self, table: DataFrame[ResultTable]
@@ -143,11 +142,10 @@ class TaxopyTaxonomyService(TaxonomyService):
def _taxid_lineage_as_str(self, taxonomy_id: int) -> Optional[str]:
"""Return the lineage of a taxon as concatenated identifiers."""
- try:
- taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db)
- except TaxidError:
+ if lineage := self.get_taxon_identifier_lineage(taxonomy_id):
+ return ";".join(str(tax_id) for tax_id in lineage)
+ else:
return None
- return ";".join([str(tax_id) for tax_id in taxon.taxid_lineage])
def add_rank_lineage(self, table: DataFrame[ResultTable]) -> DataFrame[ResultTable]:
"""Add a column for the taxon lineage as ranks to the given table."""
@@ -161,11 +159,10 @@ class TaxopyTaxonomyService(TaxonomyService):
def _rank_lineage_as_str(self, taxonomy_id: int) -> Optional[str]:
"""Return the rank lineage of a taxon as concatenated identifiers."""
- try:
- taxon = taxopy.Taxon(taxid=taxonomy_id, taxdb=self._tax_db)
- except TaxidError:
+ if lineage := self.get_taxon_rank_lineage(taxonomy_id):
+ return ";".join(lineage)
+ else:
return None
- return ";".join(taxon.rank_name_dictionary.keys())
def summarise_at(
self, profile: DataFrame[StandardProfile], rank: str
|
taxprofiler/taxpasta
|
d3d03bfe87876b8fe81bc4b9a80775bb5250ec94
|
diff --git a/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py b/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py
index 5109384..1c3e2ab 100644
--- a/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py
+++ b/tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py
@@ -82,14 +82,14 @@ def test_get_taxon_rank(tax_service: TaxopyTaxonomyService, tax_id: int, expecte
(
86398254,
[
- "Pseudomonadales",
- "Gammaproteobacteria",
- "Proteobacteria",
- "Bacteria",
"root",
+ "Bacteria",
+ "Proteobacteria",
+ "Gammaproteobacteria",
+ "Pseudomonadales",
],
),
- (1199096325, ["Saccharomycetes", "Ascomycota", "Eukaryota", "root"]),
+ (1199096325, ["root", "Eukaryota", "Ascomycota", "Saccharomycetes"]),
],
)
def test_get_taxon_name_lineage(
@@ -104,8 +104,8 @@ def test_get_taxon_name_lineage(
[
(1, [1]),
(42, None),
- (86398254, [86398254, 329474883, 1641076285, 609216830, 1]),
- (1199096325, [1199096325, 432158898, 476817098, 1]),
+ (86398254, [1, 609216830, 1641076285, 329474883, 86398254]),
+ (1199096325, [1, 476817098, 432158898, 1199096325]),
],
)
def test_get_taxon_identifier_lineage(
@@ -120,8 +120,8 @@ def test_get_taxon_identifier_lineage(
[
(1, []),
(42, None),
- (86398254, ["order", "class", "phylum", "superkingdom"]),
- (1199096325, ["class", "phylum", "superkingdom"]),
+ (86398254, ["superkingdom", "phylum", "class", "order"]),
+ (1199096325, ["superkingdom", "phylum", "class"]),
],
)
def test_get_taxon_rank_lineage(
@@ -145,9 +145,9 @@ def test_get_taxon_rank_lineage(
[
"root",
None,
- "Pseudomonadales;Gammaproteobacteria;Proteobacteria;"
- "Bacteria;root",
- "Saccharomycetes;Ascomycota;Eukaryota;root",
+ "root;Bacteria;Proteobacteria;Gammaproteobacteria;"
+ "Pseudomonadales",
+ "root;Eukaryota;Ascomycota;Saccharomycetes",
],
),
]
@@ -177,8 +177,8 @@ def test_add_name_lineage(
[
"1",
None,
- "86398254;329474883;1641076285;609216830;1",
- "1199096325;432158898;476817098;1",
+ "1;609216830;1641076285;329474883;86398254",
+ "1;476817098;432158898;1199096325",
],
),
]
@@ -206,10 +206,10 @@ def test_add_identifier_lineage(
(
"rank_lineage",
[
- "",
None,
- "order;class;phylum;superkingdom",
- "class;phylum;superkingdom",
+ None,
+ "superkingdom;phylum;class;order",
+ "superkingdom;phylum;class",
],
),
]
|
[Feature] Reverse the order of the current lineages
### Checklist
- [X] There are [no similar issues or pull requests](https://github.com/taxprofiler/taxpasta/issues) for this yet.
### Problem
Lineages are currently printed in order from taxon of interest to higher ranks (until root). This is in contrast to all other tools which print lineages from highest rank (superkingdom) to most specific.
### Solution
Reverse the order of lineages.
### Alternatives
One can find arguments for keeping either order but changing it seems to be the _de facto_ standard in the field.
### Anything else?
_No response_
|
0.0
|
d3d03bfe87876b8fe81bc4b9a80775bb5250ec94
|
[
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[86398254-expected2]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[1199096325-expected3]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[86398254-expected2]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[1199096325-expected3]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[86398254-expected2]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[1199096325-expected3]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_add_name_lineage[result0-expected0]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_add_identifier_lineage[result0-expected0]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_add_rank_lineage[result0-expected0]"
] |
[
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[1-root]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[42-None]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[86398254-Pseudomonadales]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[432158898-Ascomycota]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[492356122-Saccharomyces",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[1945799576-Escherichia",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name[1887621118-Pseudomonas",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[1-no",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[42-None]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[476817098-superkingdom]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[432158898-phylum]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[329474883-class]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[86398254-order]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[87250111-family]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[933264868-genus]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank[1887621118-species]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[1-expected0]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_name_lineage[42-None]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[1-expected0]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_identifier_lineage[42-None]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[1-expected0]",
"tests/unit/infrastructure/domain/service/test_taxopy_taxonomy_service.py::test_get_taxon_rank_lineage[42-None]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-22 13:49:24+00:00
|
apache-2.0
| 5,836 |
|
tcalmant__ipopo-120
|
diff --git a/pelix/ipopo/handlers/requiresvarfilter.py b/pelix/ipopo/handlers/requiresvarfilter.py
index a9895e0..133f51b 100644
--- a/pelix/ipopo/handlers/requiresvarfilter.py
+++ b/pelix/ipopo/handlers/requiresvarfilter.py
@@ -239,6 +239,9 @@ class _VariableFilterMixIn:
self.stop()
self.start()
+ # Force bindings update
+ self._ipopo_instance.update_bindings()
+
for svc_ref in self.get_bindings():
# Check if the current reference matches the filter
if not self.requirement.filter.matches(
|
tcalmant/ipopo
|
1d0add361ca219da8fdf72bb9ba8cb0ade01ad2f
|
diff --git a/tests/ipopo/issue_119_bundle.py b/tests/ipopo/issue_119_bundle.py
new file mode 100644
index 0000000..669ba69
--- /dev/null
+++ b/tests/ipopo/issue_119_bundle.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# -- Content-Encoding: UTF-8 --
+"""
+Issue 119 (late binding issue on RequiresVarFilter) use case
+"""
+
+from pelix.ipopo.decorators import (
+ ComponentFactory,
+ Provides,
+ Property,
+ Requires,
+ RequiresVarFilter,
+)
+
+
+@ComponentFactory("provider-factory")
+@Property("providing", "providing", None)
+@Provides("required-service")
+class Provider:
+ def __init__(self):
+ self.providing = None
+
+
+@ComponentFactory("varservice-factory")
+@Property("search", "search")
+@RequiresVarFilter(
+ "depends", "required-service", spec_filter="(prop={search})"
+)
+class VarcConsumer:
+ def __init__(self):
+ self.depends = None
+ self.search = None
diff --git a/tests/ipopo/test_requires_varfilter.py b/tests/ipopo/test_requires_varfilter.py
index 7ed07ad..e54f16d 100644
--- a/tests/ipopo/test_requires_varfilter.py
+++ b/tests/ipopo/test_requires_varfilter.py
@@ -9,6 +9,9 @@ Tests the iPOPO @RequiresVarFilter decorator.
# Standard library
import random
import string
+
+from pelix.ipopo.instance import StoredInstance
+
try:
import unittest2 as unittest
except ImportError:
@@ -38,6 +41,7 @@ class RequiresVarFilterTest(unittest.TestCase):
"""
Tests the "requires variable filter" handler behavior
"""
+
def setUp(self):
"""
Called before each test. Initiates a framework.
@@ -61,22 +65,30 @@ class RequiresVarFilterTest(unittest.TestCase):
assert isinstance(context, BundleContext)
# Prepare random string values
- random_static_1 = ''.join(random.choice(string.ascii_letters)
- for _ in range(50))
- random_static_2 = ''.join(random.choice(string.ascii_letters)
- for _ in range(50))
+ random_static_1 = "".join(
+ random.choice(string.ascii_letters) for _ in range(50)
+ )
+ random_static_2 = "".join(
+ random.choice(string.ascii_letters) for _ in range(50)
+ )
# Assert that the service is not yet available
- self.assertIsNone(context.get_service_reference(IEchoService),
- "Service is already registered")
+ self.assertIsNone(
+ context.get_service_reference(IEchoService),
+ "Service is already registered",
+ )
# Instantiate the components
consumer_single = self.ipopo.instantiate(
- module.FACTORY_REQUIRES_VAR_FILTER, NAME_A,
- {"static": random_static_1})
+ module.FACTORY_REQUIRES_VAR_FILTER,
+ NAME_A,
+ {"static": random_static_1},
+ )
consumer_multi = self.ipopo.instantiate(
- module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, NAME_B,
- {"static": random_static_1})
+ module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE,
+ NAME_B,
+ {"static": random_static_1},
+ )
consumers = (consumer_single, consumer_multi)
# Force the "answer" property to an int
@@ -85,46 +97,60 @@ class RequiresVarFilterTest(unittest.TestCase):
# Component must be invalid
for consumer in consumers:
- self.assertListEqual([IPopoEvent.INSTANTIATED], consumer.states,
- "Invalid component states: {0}"
- .format(consumer.states))
+ self.assertListEqual(
+ [IPopoEvent.INSTANTIATED],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
# Instantiate a service, matching the filter
svc1 = object()
context.register_service(
- IEchoService, svc1,
- {"s": random_static_1, "a": consumer_single.answer})
+ IEchoService,
+ svc1,
+ {"s": random_static_1, "a": consumer_single.answer},
+ )
# The consumer must have been validated
for consumer in consumers:
self.assertListEqual(
- [IPopoEvent.BOUND, IPopoEvent.VALIDATED], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [IPopoEvent.BOUND, IPopoEvent.VALIDATED],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
self.assertIs(consumer_single.service, svc1, "Wrong service injected")
- self.assertListEqual(consumer_multi.service, [svc1],
- "Wrong service injected")
+ self.assertListEqual(
+ consumer_multi.service, [svc1], "Wrong service injected"
+ )
# New service, still matching
svc2 = object()
reg2 = context.register_service(
- IEchoService, svc2,
- {"s": random_static_1, "a": consumer_single.answer})
+ IEchoService,
+ svc2,
+ {"s": random_static_1, "a": consumer_single.answer},
+ )
# The single consumer must not have been modified
- self.assertListEqual([], consumer_single.states,
- "Invalid component states: {0}"
- .format(consumer_single.states))
+ self.assertListEqual(
+ [],
+ consumer_single.states,
+ "Invalid component states: {0}".format(consumer_single.states),
+ )
self.assertIs(consumer_single.service, svc1, "Wrong service injected")
# The aggregate consumer must have been modified
- self.assertListEqual([IPopoEvent.BOUND], consumer_multi.states,
- "Invalid component states: {0}"
- .format(consumer_multi.states))
- self.assertListEqual(consumer_multi.service, [svc1, svc2],
- "Second service not injected")
+ self.assertListEqual(
+ [IPopoEvent.BOUND],
+ consumer_multi.states,
+ "Invalid component states: {0}".format(consumer_multi.states),
+ )
+ self.assertListEqual(
+ consumer_multi.service, [svc1, svc2], "Second service not injected"
+ )
# Reset states
for consumer in consumers:
@@ -134,17 +160,22 @@ class RequiresVarFilterTest(unittest.TestCase):
reg2.unregister()
# The single consumer must not have been modified
- self.assertListEqual([], consumer_single.states,
- "Invalid component states: {0}"
- .format(consumer_single.states))
+ self.assertListEqual(
+ [],
+ consumer_single.states,
+ "Invalid component states: {0}".format(consumer_single.states),
+ )
self.assertIs(consumer_single.service, svc1, "Wrong service injected")
# The aggregate consumer must have been modified
- self.assertListEqual([IPopoEvent.UNBOUND], consumer_multi.states,
- "Invalid component states: {0}"
- .format(consumer_multi.states))
- self.assertListEqual(consumer_multi.service, [svc1],
- "Second service not removed")
+ self.assertListEqual(
+ [IPopoEvent.UNBOUND],
+ consumer_multi.states,
+ "Invalid component states: {0}".format(consumer_multi.states),
+ )
+ self.assertListEqual(
+ consumer_multi.service, [svc1], "Second service not removed"
+ )
# Change the filter property to the exact same value
for consumer in consumers:
@@ -152,14 +183,17 @@ class RequiresVarFilterTest(unittest.TestCase):
consumer.change(42)
# The consumer must not have been modified
- self.assertListEqual([], consumer.states,
- "Invalid component states: {0}"
- .format(consumer.states))
+ self.assertListEqual(
+ [],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
self.assertIs(consumer_single.service, svc1, "Wrong service injected")
- self.assertListEqual(consumer_multi.service, [svc1],
- "Wrong service injected")
+ self.assertListEqual(
+ consumer_multi.service, [svc1], "Wrong service injected"
+ )
# Change the filter property to a new value
for consumer in consumers:
@@ -170,60 +204,76 @@ class RequiresVarFilterTest(unittest.TestCase):
self.assertListEqual(
[IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND],
consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ "Invalid component states: {0}".format(consumer.states),
+ )
self.assertIs(consumer.service, None, "A service is injected")
consumer.reset()
# New service, matching part of the filter
svc3 = object()
context.register_service(
- IEchoService, svc3,
- {"s": random_static_2, "a": consumer_single.answer})
+ IEchoService,
+ svc3,
+ {"s": random_static_2, "a": consumer_single.answer},
+ )
# The consumer must not have been modified
for consumer in consumers:
- self.assertListEqual([], consumer.states,
- "Invalid component states: {0}"
- .format(consumer.states))
+ self.assertListEqual(
+ [],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
self.assertIs(consumer.service, None, "A service is injected")
consumer.reset()
# New service, matching the new filer
svc4 = object()
reg4 = context.register_service(
- IEchoService, svc4,
- {"s": random_static_1, "a": consumer_single.answer})
+ IEchoService,
+ svc4,
+ {"s": random_static_1, "a": consumer_single.answer},
+ )
# The consumer must not have been modified
for consumer in consumers:
self.assertListEqual(
[IPopoEvent.BOUND, IPopoEvent.VALIDATED],
consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
self.assertIs(consumer_single.service, svc4, "Wrong service injected")
- self.assertListEqual(consumer_multi.service, [svc4],
- "Wrong service injected")
+ self.assertListEqual(
+ consumer_multi.service, [svc4], "Wrong service injected"
+ )
# New service, matching the new filer
svc5 = object()
reg5 = context.register_service(
- IEchoService, svc5,
- {"s": random_static_1, "a": consumer_single.answer})
+ IEchoService,
+ svc5,
+ {"s": random_static_1, "a": consumer_single.answer},
+ )
# The single consumer must not have been modified
- self.assertListEqual([], consumer_single.states,
- "Invalid component states: {0}"
- .format(consumer_single.states))
+ self.assertListEqual(
+ [],
+ consumer_single.states,
+ "Invalid component states: {0}".format(consumer_single.states),
+ )
self.assertIs(consumer_single.service, svc4, "Wrong service injected")
# The aggregate consumer must have been modified
- self.assertListEqual([IPopoEvent.BOUND], consumer_multi.states,
- "Invalid component states: {0}"
- .format(consumer_multi.states))
- self.assertListEqual(consumer_multi.service, [svc4, svc5],
- "Second service not injected")
+ self.assertListEqual(
+ [IPopoEvent.BOUND],
+ consumer_multi.states,
+ "Invalid component states: {0}".format(consumer_multi.states),
+ )
+ self.assertListEqual(
+ consumer_multi.service, [svc4, svc5], "Second service not injected"
+ )
# Reset states
for consumer in consumers:
@@ -236,15 +286,19 @@ class RequiresVarFilterTest(unittest.TestCase):
self.assertListEqual(
rebind_states,
consumer_single.states,
- "Invalid component states: {0}".format(consumer_single.states))
+ "Invalid component states: {0}".format(consumer_single.states),
+ )
self.assertIs(consumer_single.service, svc5, "Wrong service injected")
# The aggregate consumer must have been modified
- self.assertListEqual([IPopoEvent.UNBOUND], consumer_multi.states,
- "Invalid component states: {0}"
- .format(consumer_multi.states))
- self.assertListEqual(consumer_multi.service, [svc5],
- "First service not removed")
+ self.assertListEqual(
+ [IPopoEvent.UNBOUND],
+ consumer_multi.states,
+ "Invalid component states: {0}".format(consumer_multi.states),
+ )
+ self.assertListEqual(
+ consumer_multi.service, [svc5], "First service not removed"
+ )
# Reset states
for consumer in consumers:
@@ -255,8 +309,10 @@ class RequiresVarFilterTest(unittest.TestCase):
for consumer in consumers:
self.assertListEqual(
- [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
self.assertIs(consumer.service, None, "A service is still injected")
consumer.reset()
@@ -265,9 +321,15 @@ class RequiresVarFilterTest(unittest.TestCase):
Tests the @RequiresVarFilter handler without immediate_rebind (default)
"""
module = install_bundle(self.framework)
- self.__internal_test(module,
- [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND,
- IPopoEvent.BOUND, IPopoEvent.VALIDATED])
+ self.__internal_test(
+ module,
+ [
+ IPopoEvent.INVALIDATED,
+ IPopoEvent.UNBOUND,
+ IPopoEvent.BOUND,
+ IPopoEvent.VALIDATED,
+ ],
+ )
def test_immediate_rebind(self):
"""
@@ -276,8 +338,10 @@ class RequiresVarFilterTest(unittest.TestCase):
# Modify component factories
module = install_bundle(self.framework)
- for clazz in (module.RequiresVarFilterComponentFactory,
- module.RequiresVarFilterAggregateComponentFactory):
+ for clazz in (
+ module.RequiresVarFilterComponentFactory,
+ module.RequiresVarFilterAggregateComponentFactory,
+ ):
context = get_factory_context(clazz)
configs = context.get_handler(RequiresVarFilter.HANDLER_ID)
configs["service"].immediate_rebind = True
@@ -292,20 +356,27 @@ class RequiresVarFilterTest(unittest.TestCase):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
- random_static = ''.join(random.choice(string.ascii_letters)
- for _ in range(50))
+ random_static = "".join(
+ random.choice(string.ascii_letters) for _ in range(50)
+ )
# Assert that the service is not yet available
- self.assertIsNone(context.get_service_reference(IEchoService),
- "Service is already registered")
+ self.assertIsNone(
+ context.get_service_reference(IEchoService),
+ "Service is already registered",
+ )
# Instantiate the components
consumer_single = self.ipopo.instantiate(
- module.FACTORY_REQUIRES_VAR_FILTER, NAME_A,
- {"static": random_static})
+ module.FACTORY_REQUIRES_VAR_FILTER,
+ NAME_A,
+ {"static": random_static},
+ )
consumer_multi = self.ipopo.instantiate(
- module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, NAME_B,
- {"static": random_static})
+ module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE,
+ NAME_B,
+ {"static": random_static},
+ )
consumers = (consumer_single, consumer_multi)
# Force the "answer" property to an int
@@ -315,15 +386,22 @@ class RequiresVarFilterTest(unittest.TestCase):
# Instantiate a service, matching the filter
svc1 = object()
context.register_service(
- IEchoService, svc1,
- {"s": random_static, "a": consumer_single.answer})
+ IEchoService,
+ svc1,
+ {"s": random_static, "a": consumer_single.answer},
+ )
# Component must be valid
for consumer in consumers:
self.assertListEqual(
- [IPopoEvent.INSTANTIATED, IPopoEvent.BOUND,
- IPopoEvent.VALIDATED], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [
+ IPopoEvent.INSTANTIATED,
+ IPopoEvent.BOUND,
+ IPopoEvent.VALIDATED,
+ ],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
# Set an invalid filter
@@ -332,8 +410,10 @@ class RequiresVarFilterTest(unittest.TestCase):
# The consumer must have been validated
self.assertListEqual(
- [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [IPopoEvent.INVALIDATED, IPopoEvent.UNBOUND],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
self.assertIs(consumer.service, None, "A service is injected")
@@ -346,12 +426,15 @@ class RequiresVarFilterTest(unittest.TestCase):
# Instantiate a service, matching the filter
svc = object()
reg = context.register_service(
- IEchoService, svc, {"s": random_static, "a": invalid})
+ IEchoService, svc, {"s": random_static, "a": invalid}
+ )
# Nothing should happen
self.assertListEqual(
- [], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
reg.unregister()
@@ -365,20 +448,27 @@ class RequiresVarFilterTest(unittest.TestCase):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
- random_static = ''.join(random.choice(string.ascii_letters)
- for _ in range(50))
+ random_static = "".join(
+ random.choice(string.ascii_letters) for _ in range(50)
+ )
# Assert that the service is not yet available
- self.assertIsNone(context.get_service_reference(IEchoService),
- "Service is already registered")
+ self.assertIsNone(
+ context.get_service_reference(IEchoService),
+ "Service is already registered",
+ )
# Instantiate the components
consumer_single = self.ipopo.instantiate(
- module.FACTORY_REQUIRES_VAR_FILTER, NAME_A,
- {"static": random_static})
+ module.FACTORY_REQUIRES_VAR_FILTER,
+ NAME_A,
+ {"static": random_static},
+ )
consumer_multi = self.ipopo.instantiate(
- module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE, NAME_B,
- {"static": random_static})
+ module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE,
+ NAME_B,
+ {"static": random_static},
+ )
consumers = (consumer_single, consumer_multi)
# Force the "answer" property to an int
@@ -388,15 +478,22 @@ class RequiresVarFilterTest(unittest.TestCase):
# Instantiate a service, matching the filter
svc1 = object()
context.register_service(
- IEchoService, svc1,
- {"s": random_static, "a": consumer_single.answer})
+ IEchoService,
+ svc1,
+ {"s": random_static, "a": consumer_single.answer},
+ )
# Component must be valid
for consumer in consumers:
self.assertListEqual(
- [IPopoEvent.INSTANTIATED, IPopoEvent.BOUND,
- IPopoEvent.VALIDATED], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [
+ IPopoEvent.INSTANTIATED,
+ IPopoEvent.BOUND,
+ IPopoEvent.VALIDATED,
+ ],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
# Set the filter with a similar value (same once formatted)
@@ -406,13 +503,16 @@ class RequiresVarFilterTest(unittest.TestCase):
# The consumer should not be notified
for consumer in consumers:
self.assertListEqual(
- [], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
consumer.reset()
self.assertIs(consumer_single.service, svc1, "Wrong service injected")
- self.assertListEqual(consumer_multi.service, [svc1],
- "Wrong service injected")
+ self.assertListEqual(
+ consumer_multi.service, [svc1], "Wrong service injected"
+ )
def test_incomplete_properties(self):
"""
@@ -423,21 +523,26 @@ class RequiresVarFilterTest(unittest.TestCase):
assert isinstance(context, BundleContext)
answer = 42
- random_static = ''.join(random.choice(string.ascii_letters)
- for _ in range(50))
+ random_static = "".join(
+ random.choice(string.ascii_letters) for _ in range(50)
+ )
# Assert that the service is not yet available
- self.assertIsNone(context.get_service_reference(IEchoService),
- "Service is already registered")
+ self.assertIsNone(
+ context.get_service_reference(IEchoService),
+ "Service is already registered",
+ )
# Instantiate a service, matching the filter
svc1 = object()
context.register_service(
- IEchoService, svc1, {"s": random_static, "a": answer})
+ IEchoService, svc1, {"s": random_static, "a": answer}
+ )
for name, factory in (
- (NAME_A, module.FACTORY_REQUIRES_VAR_FILTER),
- (NAME_B, module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE)):
+ (NAME_A, module.FACTORY_REQUIRES_VAR_FILTER),
+ (NAME_B, module.FACTORY_REQUIRES_VAR_FILTER_AGGREGATE),
+ ):
# Instantiate the component, without the static property
consumer = self.ipopo.instantiate(factory, name, {})
@@ -446,14 +551,59 @@ class RequiresVarFilterTest(unittest.TestCase):
# Component must be instantiated, but not valid
self.assertListEqual(
- [IPopoEvent.INSTANTIATED], consumer.states,
- "Invalid component states: {0}".format(consumer.states))
+ [IPopoEvent.INSTANTIATED],
+ consumer.states,
+ "Invalid component states: {0}".format(consumer.states),
+ )
self.assertIs(consumer.service, None, "Service injected")
+ def test_late_binding(self):
+ """
+ Tests late binding, see issue #119:
+ https://github.com/tcalmant/ipopo/issues/119
+ """
+ install_bundle(self.framework, "tests.ipopo.issue_119_bundle")
+ context = self.framework.get_bundle_context()
+ assert isinstance(context, BundleContext)
+
+ self.ipopo.instantiate("varservice-factory", "varservice-instance")
+ self.ipopo.instantiate("provider-factory", "provider-instance-1", {"prop": "svc1"})
+ self.ipopo.instantiate("provider-factory", "provider-instance-2", {"prop": "svc2"})
+
+ svc1 = self.ipopo.get_instance("provider-instance-1")
+ svc2 = self.ipopo.get_instance("provider-instance-2")
+ consumer = self.ipopo.get_instance("varservice-instance")
+
+ self.assertEqual(self.ipopo.get_instance_details("provider-instance-1")["state"], StoredInstance.VALID)
+ self.assertEqual(self.ipopo.get_instance_details("provider-instance-2")["state"], StoredInstance.VALID)
+ self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.INVALID)
+
+ consumer.search = "svc1"
+ self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.VALID)
+ self.assertEqual(consumer.depends, svc1)
+
+ consumer.search = "svc2"
+ self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.VALID)
+ self.assertEqual(consumer.depends, svc2)
+
+ consumer.search = "non-existent"
+ self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.INVALID)
+ self.assertIsNone(consumer.depends)
+
+ consumer.search = "svc1"
+ self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.VALID)
+ self.assertEqual(consumer.depends, svc1)
+
+ consumer.search = None
+ self.assertEqual(self.ipopo.get_instance_details("varservice-instance")["state"], StoredInstance.INVALID)
+ self.assertIsNone(consumer.depends)
+
+
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
+
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
RequiresVarFilter doesn't update bindings
When updating a property used in the filter of a RequiresVarFilter decorator on an invalid component instance, the bindings and the component lifecycle are not updated.
Current workaround: update `pelix.ipopo.handlers.requiresvarfilter::_VariableMixIn::_reset` to call `self._ipopo_instance.update_bindings()` after `self.start()` and `self._ipopo_instance.check_lifecycle()` after the bindings for loop.
|
0.0
|
1d0add361ca219da8fdf72bb9ba8cb0ade01ad2f
|
[
"tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_late_binding"
] |
[
"tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_immediate_rebind",
"tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_incomplete_properties",
"tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_invalid_filter",
"tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_no_change",
"tests/ipopo/test_requires_varfilter.py::RequiresVarFilterTest::test_requires_var_filter"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-10-28 18:53:24+00:00
|
apache-2.0
| 5,837 |
|
tech-teach__marshmallowjson-3
|
diff --git a/.travis.yml b/.travis.yml
index 4f0901a..71234d5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,26 +1,29 @@
-# Config file for automatic testing at travis-ci.org
-# This file will be regenerated if you run travis_pypi_setup.py
-
-language: python
-python:
- - 3.6
- - 3.5
-
-# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
-install: pip install -U tox-travis
-
-# command to run tests, e.g. python setup.py test
-script: tox
-
-# After you create the Github repo and add it to Travis, run the
-# travis_pypi_setup.py script to finish PyPI deployment setup
+# This file was autogenerated and will overwrite each time you run travis_pypi_setup.py
deploy:
+ password:
+ secure: !!binary |
+ VnZ0MytIcHhSRkxWdGFqYnd6WGo5eWZxN0E0UE9kRm5QNTFZR3FQcXQ3MFEvakp2OXhkMnZwbVcr
+ MElOVVNjZFVqdUljUUpCa2NrbXdPZUVzL1FuNThBRVhJRjhCMVNSL0FGTFV1bW9DTEU0YmgzcHgw
+ d2VSdk8rZXpkUFgxdmx0TFF4bWdLR2xHVG4yN3RlSEtTdVR6eVVWTkNzSGwrKzB5a0VSeFBPODBC
+ NDl4S0EvbjVPQk9YSkFYZzNXODUvaDRwUTQ0Z2NhSHg3bTdZSTBGSytRZGJTZHRWTjZrUEV1R3hJ
+ MlNSdkhQdVdmWjhhY0Q5eXJSVmtVRk5iUldzNTZUeEI3TUp0ajkxdEJTdGZLdTM0Z2ZITGNXdTNp
+ M1dQUVl3UmlZUFNFUjZvMnVZZzFsR1k3ZmJhM01ZZUVGdnRYZER3YndUcEh6T1kyYnlSd1ptSlhr
+ N3VCOUw1dlNLa1hyd0VOcXgyaU12Wm5jMVhNbkRqcTNnOHYvUk5XSWVoSnFoMWN4ZGtkNHhPREty
+ enpJMUNZbGc5b0FaL1JSYVVvR3ZuNkRtYVN5aTU2U2NZZTJWaUlld1E3Zm13eEpKQVBmRzBMY2RO
+ QUkrU0tCUmVqenl6bHlBSndwS20wRU1kOUx4dlRoQTVydzlwS0pVSkYzN20xWHJGUU9OK29nOUFU
+ YzZKK3puSUtQRmVXTWlHUU5xL1RRZUI5YTZPcnRZQmxZWjY2ZldhelppOEVXL01PSUlXQnNDU0Zm
+ VG9VSXNDSDAxTFNKczc0MzRjdVJORWZON1FhOVVDcnh0MGVNcnNDTVRGMWxyV28vbW9jODU0TXlZ
+ bmV2UlFtOHVxa0k4Q2JaTStvM0pDV2lCQXV6MStVZjdaR1R2OThlcFVvYkN1WGhZY00xTU1nd1E9
provider: pypi
distributions: sdist bdist_wheel
user: odarbelaeze
- password:
- secure: PLEASE_REPLACE_ME
- on:
- tags: true
- repo: odarbelaeze/marshmallowjson
+ true:
python: 3.6
+ repo: odarbelaeze/marshmallowjson
+ tags: true
+install: pip install -U tox-travis
+language: python
+python:
+- 3.6
+- 3.5
+script: tox
diff --git a/marshmallowjson/cli.py b/marshmallowjson/cli.py
index 8cdfbe7..b19d723 100644
--- a/marshmallowjson/cli.py
+++ b/marshmallowjson/cli.py
@@ -1,14 +1,41 @@
"""Console script for marshmallowjson."""
import click
+import collections
+import json
+import sys
+
+
+def fail(kind, type_, name):
+ click.echo(click.style(
+ '{kind} is not a known type in {type_}.{name}'.format(
+ kind=kind,
+ type_=type_,
+ name=name,
+ ),
+ fg='red'
+ ))
+ sys.exit(1)
@click.command()
-def main(args=None):
- """Console script for marshmallowjson."""
- click.echo("Replace this message by putting your code into "
- "marshmallowjson.cli.main")
- click.echo("See click documentation at http://click.pocoo.org/")
[email protected]('definition', type=click.File('r'))
+def main(definition):
+ """Validate an schema for marshmallow json"""
+ known = set('string boolean uuid number integer decimal'.split())
+ definitions = json.load(definition, object_pairs_hook=collections.OrderedDict)
+ for type_, schema in definitions.items():
+ for name, field in schema.items():
+ kind = field['kind']
+ if kind == 'list':
+ items = field['items']
+ if items not in known:
+ fail(items, type_, name)
+ continue
+ if kind not in known:
+ fail(kind, type_, name)
+ known.add(type_)
+ click.echo(click.style('All clear', fg='green'))
if __name__ == "__main__":
|
tech-teach/marshmallowjson
|
b8a2e3edf36dc7c65b73ed108371e1b2743a4b8e
|
diff --git a/tests/data/basic.json b/tests/data/basic.json
new file mode 100644
index 0000000..93aaa83
--- /dev/null
+++ b/tests/data/basic.json
@@ -0,0 +1,20 @@
+{
+ "StringType": {
+ "field": {
+ "kind": "string",
+ "required": false
+ }
+ },
+ "NumberType": {
+ "field": {
+ "kind": "number",
+ "required": false
+ }
+ },
+ "BooleanType": {
+ "field": {
+ "kind": "boolean",
+ "required": false
+ }
+ }
+}
diff --git a/tests/data/list.json b/tests/data/list.json
new file mode 100644
index 0000000..a08f382
--- /dev/null
+++ b/tests/data/list.json
@@ -0,0 +1,15 @@
+{
+ "StringType": {
+ "field": {
+ "kind": "string",
+ "required": false
+ }
+ },
+ "ListOfString": {
+ "field": {
+ "kind": "list",
+ "items": "StringType",
+ "required": false
+ }
+ }
+}
diff --git a/tests/data/unknown.json b/tests/data/unknown.json
new file mode 100644
index 0000000..7bd8f98
--- /dev/null
+++ b/tests/data/unknown.json
@@ -0,0 +1,9 @@
+{
+ "Type": {
+ "field": {
+ "kind": "Unknown",
+ "required": false,
+ "doc": "Unknow is nowhere near the type definitions, that's an error"
+ }
+ }
+}
diff --git a/tests/test_marshmallowjson.py b/tests/test_marshmallowjson.py
index 8a3236c..62824c1 100644
--- a/tests/test_marshmallowjson.py
+++ b/tests/test_marshmallowjson.py
@@ -1,5 +1,6 @@
"""Tests for `marshmallowjson` package."""
+import os
import pytest
from click.testing import CliRunner
@@ -9,27 +10,45 @@ from marshmallowjson import cli
@pytest.fixture
-def response():
- """Sample pytest fixture.
+def unknown():
+ root = os.path.dirname(__file__)
+ return os.path.join(root, 'data/unknown.json')
- See more at: http://doc.pytest.org/en/latest/fixture.html
- """
- # import requests
- # return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
+
[email protected]
+def basic():
+ root = os.path.dirname(__file__)
+ return os.path.join(root, 'data/basic.json')
+
+
[email protected]
+def list_schema():
+ root = os.path.dirname(__file__)
+ return os.path.join(root, 'data/list.json')
+
+
+def test_error_when_using_unknown_type(unknown):
+ runner = CliRunner()
+ result = runner.invoke(cli.main, [unknown])
+ assert result.exit_code == 1, result.output
+ assert 'Unknown is not a known type in Type.field' in result.output
+
+
+def test_all_basic_types_are_allowed(basic):
+ runner = CliRunner()
+ result = runner.invoke(cli.main, [basic])
+ assert result.exit_code == 0, result.output
-def test_content(response):
- """Sample pytest test function with the pytest fixture as an argument."""
- # from bs4 import BeautifulSoup
- # assert 'GitHub' in BeautifulSoup(response.content).title.string
+def test_lists_are_allowed(list_schema):
+ runner = CliRunner()
+ result = runner.invoke(cli.main, [list_schema])
+ assert result.exit_code == 0, result.output
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
- result = runner.invoke(cli.main)
- assert result.exit_code == 0
- assert 'marshmallowjson.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
|
validate json schema dependency order
Is necessary works with a valid json schema, so, to get this, create a json validator for this structure:
`{
"Identifier": {
"catalog": {
"kind": "str",
"required": true,
"doc": "Que vaina es esto"
},
"entry": {
"kind": "str",
"required": true
},
"uuid": {
"kind": "uuid",
"required": true
}
},
"General": {
"identifier": {
"kind": "object",
"schema": "Identifier",
"required": true
},
"title": {
"kind": "str",
"required": true
},
"keywords": {
"kind": "list",
"items": "str",
"required": false
}
},
"LearningObject": {
"general": {
"kind": "object",
"schema": "General",
"required": false
}
}
}
`
|
0.0
|
b8a2e3edf36dc7c65b73ed108371e1b2743a4b8e
|
[
"tests/test_marshmallowjson.py::test_error_when_using_unknown_type",
"tests/test_marshmallowjson.py::test_all_basic_types_are_allowed",
"tests/test_marshmallowjson.py::test_lists_are_allowed"
] |
[
"tests/test_marshmallowjson.py::test_command_line_interface",
"tests/test_marshmallowjson.py::test_avoid_warning"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-01-28 23:48:38+00:00
|
mit
| 5,838 |
|
tefra__pytuber-20
|
diff --git a/pytuber/cli.py b/pytuber/cli.py
index f432978..2c7d6e6 100644
--- a/pytuber/cli.py
+++ b/pytuber/cli.py
@@ -68,6 +68,8 @@ def add():
"""Add playlist."""
+add.add_command(core.add_from_editor)
+add.add_command(core.add_from_file)
add.add_command(lastfm.add)
diff --git a/pytuber/core/commands/__init__.py b/pytuber/core/commands/__init__.py
index 27fbf7e..888f16e 100644
--- a/pytuber/core/commands/__init__.py
+++ b/pytuber/core/commands/__init__.py
@@ -7,6 +7,7 @@ from pytuber.core.commands.cmd_show import show
from pytuber.core.commands.cmd_autocomplete import autocomplete
from pytuber.core.commands.cmd_clean import clean
from pytuber.core.commands.cmd_quota import quota
+from pytuber.core.commands.cmd_add import add_from_editor, add_from_file
__all__ = [
"setup",
@@ -18,4 +19,6 @@ __all__ = [
"autocomplete",
"clean",
"quota",
+ "add_from_editor",
+ "add_from_file",
]
diff --git a/pytuber/core/commands/cmd_add.py b/pytuber/core/commands/cmd_add.py
new file mode 100644
index 0000000..e0af66b
--- /dev/null
+++ b/pytuber/core/commands/cmd_add.py
@@ -0,0 +1,97 @@
+from typing import List
+
+import click
+from tabulate import tabulate
+
+from pytuber.core.models import (
+ PlaylistManager,
+ PlaylistType,
+ Provider,
+ TrackManager,
+)
+from pytuber.lastfm.commands.cmd_add import option_title
+from pytuber.utils import magenta
+
+
[email protected]("editor")
+@option_title()
+def add_from_editor(title: str) -> None:
+ """Create playlist in a text editor."""
+ marker = (
+ "\n\n# Copy/Paste your track list and hit save!\n"
+ "# One line per track, make sure it doesn't start with a #\n"
+ "# Separate the track artist and title with a single dash `-`\n"
+ )
+ message = click.edit(marker)
+ create_playlist(title, parse_tracklist(message or ""))
+
+
[email protected]("file")
[email protected]("file", type=click.Path(), required=True)
+@option_title()
+def add_from_file(file: str, title: str) -> None:
+ """Import a playlist from a text file."""
+
+ with open(file, "r") as fp:
+ text = fp.read()
+
+ create_playlist(title, parse_tracklist(text or ""))
+
+
+def parse_tracklist(text):
+ tracks: List[tuple] = []
+ for line in text.split("\n"):
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+
+ parts = line.split("-", 1)
+ if len(parts) != 2:
+ continue
+
+ artist, track = list(map(str.strip, parts))
+ if not artist or not track or (artist, track) in tracks:
+ continue
+
+ tracks.append((artist, track))
+
+ return tracks
+
+
+def create_playlist(title, tracks):
+ if not tracks:
+ return click.secho("Tracklist is empty, aborting...")
+
+ click.clear()
+ click.secho(
+ "{}\n\n{}\n".format(
+ tabulate( # type: ignore
+ [
+ (magenta("Title:"), title),
+ (magenta("Tracks:"), len(tracks)),
+ ],
+ tablefmt="plain",
+ colalign=("right", "left"),
+ ),
+ tabulate( # type: ignore
+ [
+ (i + 1, track[0], track[1])
+ for i, track in enumerate(tracks)
+ ],
+ headers=("No", "Artist", "Track Name"),
+ ),
+ )
+ )
+ click.confirm("Are you sure you want to save this playlist?", abort=True)
+ playlist = PlaylistManager.set(
+ dict(
+ type=PlaylistType.EDITOR,
+ provider=Provider.user,
+ title=title.strip(),
+ tracks=[
+ TrackManager.set(dict(artist=artist, name=name)).id
+ for artist, name in tracks
+ ],
+ )
+ )
+ click.secho("Added playlist: {}!".format(playlist.id))
diff --git a/pytuber/core/commands/s b/pytuber/core/commands/s
new file mode 100644
index 0000000..e69de29
diff --git a/pytuber/core/models.py b/pytuber/core/models.py
index 861373d..53f12c0 100644
--- a/pytuber/core/models.py
+++ b/pytuber/core/models.py
@@ -16,6 +16,14 @@ from pytuber.utils import timestamp
class Provider(enum.Enum):
lastfm = "last.fm"
youtube = "youtube"
+ user = "user"
+
+ def __str__(self):
+ return self.value
+
+
+class PlaylistType(enum.Enum):
+ EDITOR = "editor"
def __str__(self):
return self.value
diff --git a/pytuber/lastfm/commands/cmd_add.py b/pytuber/lastfm/commands/cmd_add.py
index 1fd87a3..1f451b1 100644
--- a/pytuber/lastfm/commands/cmd_add.py
+++ b/pytuber/lastfm/commands/cmd_add.py
@@ -16,7 +16,7 @@ from .cmd_fetch import fetch_tracks
@click.group("lastfm")
def add():
- """Last.fm is a music service that learns what you love."""
+ """Create playlists from Last.fm api."""
option_limit = partial(
|
tefra/pytuber
|
ae19a31c38462821ec22cd7376914ddce6a15a4f
|
diff --git a/tests/core/commands/test_cmd_add.py b/tests/core/commands/test_cmd_add.py
new file mode 100644
index 0000000..c1fdd90
--- /dev/null
+++ b/tests/core/commands/test_cmd_add.py
@@ -0,0 +1,104 @@
+from unittest import mock
+
+from pytuber import cli
+from pytuber.core.commands.cmd_add import create_playlist, parse_tracklist
+from pytuber.core.models import PlaylistManager, PlaylistType, Provider
+from tests.utils import CommandTestCase, PlaylistFixture
+
+
+class CommandAddTests(CommandTestCase):
+ @mock.patch("click.edit")
+ @mock.patch("pytuber.core.commands.cmd_add.create_playlist")
+ @mock.patch("pytuber.core.commands.cmd_add.parse_tracklist")
+ def test_add_from_editor(self, parse_tracklist, create_playlist, clk_edit):
+ clk_edit.return_value = "foo"
+ parse_tracklist.return_value = ["a", "b"]
+ self.runner.invoke(
+ cli, ["add", "editor", "--title", "My Cool Playlist"]
+ )
+ parse_tracklist.assert_called_once_with("foo")
+ create_playlist.assert_called_once_with("My Cool Playlist", ["a", "b"])
+
+ @mock.patch("pytuber.core.commands.cmd_add.create_playlist")
+ @mock.patch("pytuber.core.commands.cmd_add.parse_tracklist")
+ def test_add_from_file(self, parse_tracklist, create_playlist):
+ parse_tracklist.return_value = ["a", "b"]
+ with self.runner.isolated_filesystem():
+ with open("hello.txt", "w") as f:
+ f.write("foo")
+
+ self.runner.invoke(
+ cli,
+ ["add", "file", "hello.txt", "--title", "My Cool Playlist"],
+ )
+
+ parse_tracklist.assert_called_once_with("foo")
+ create_playlist.assert_called_once_with(
+ "My Cool Playlist", ["a", "b"]
+ )
+
+
+class CommandAddUtilsTests(CommandTestCase):
+ def test_parse_tracklist(self):
+ text = "\n".join(
+ (
+ "Queen - Bohemian Rhapsody",
+ " Queen - Bohemian Rhapsody",
+ "Queen -I want to break free",
+ "#" " ",
+ "Wrong Format",
+ )
+ )
+ actual = parse_tracklist(text)
+ expected = [
+ ("Queen", "Bohemian Rhapsody"),
+ ("Queen", "I want to break free"),
+ ]
+ self.assertEqual(expected, actual)
+
+ @mock.patch("pytuber.core.commands.cmd_add.magenta")
+ @mock.patch.object(PlaylistManager, "set")
+ @mock.patch("click.confirm")
+ @mock.patch("click.secho")
+ @mock.patch("click.clear")
+ def test_create_playlist(self, clear, secho, confirm, set, magenta):
+ magenta.side_effect = lambda x: x
+ set.return_value = PlaylistFixture.one()
+ tracks = [
+ ("Queen", "Bohemian Rhapsody"),
+ ("Queen", "I want to break free"),
+ ]
+ create_playlist("My Cool Playlist", tracks)
+
+ expected_ouput = (
+ "Title: My Cool Playlist",
+ "Tracks: 2",
+ "",
+ " No Artist Track Name",
+ "---- -------- --------------------",
+ " 1 Queen Bohemian Rhapsody",
+ " 2 Queen I want to break free",
+ )
+
+ self.assertOutput(expected_ouput, secho.call_args_list[0][0][0])
+ self.assertEqual(
+ "Added playlist: id_a!", secho.call_args_list[1][0][0]
+ )
+
+ clear.assert_called_once_with()
+ confirm.assert_called_once_with(
+ "Are you sure you want to save this playlist?", abort=True
+ )
+ set.assert_called_once_with(
+ dict(
+ type=PlaylistType.EDITOR,
+ provider=Provider.user,
+ title="My Cool Playlist",
+ tracks=["55a4d2b", "b045fee"],
+ )
+ )
+
+ @mock.patch("click.secho")
+ def test_create_playlist_empty_tracks(self, secho):
+ create_playlist("foo", [])
+ secho.assert_called_once_with("Tracklist is empty, aborting...")
|
Support raw string format
A file containing tracks one per line and a direct copy/paste in the terminal
|
0.0
|
ae19a31c38462821ec22cd7376914ddce6a15a4f
|
[
"tests/core/commands/test_cmd_add.py::CommandAddTests::test_add_from_editor",
"tests/core/commands/test_cmd_add.py::CommandAddTests::test_add_from_file",
"tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_create_playlist",
"tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_create_playlist_empty_tracks",
"tests/core/commands/test_cmd_add.py::CommandAddUtilsTests::test_parse_tracklist"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-10 17:39:49+00:00
|
mit
| 5,839 |
|
tefra__xsdata-273
|
diff --git a/xsdata/formats/converter.py b/xsdata/formats/converter.py
index b5897fe2..e222c88d 100644
--- a/xsdata/formats/converter.py
+++ b/xsdata/formats/converter.py
@@ -230,12 +230,12 @@ class QNameConverter(Converter):
self, value: QName, ns_map: Optional[Dict] = None, **kwargs: Any
) -> str:
"""
- Convert a QName instance to string either with a namespace prefix if
- context namespaces are provided or as fully qualified with the
- namespace uri.
+ Convert a QName instance to string either with a namespace prefix if a
+ prefix-URI namespaces mapping is provided or to a fully qualified name
+ with the namespace.
examples:
- - QName("http://www.w3.org/2001/XMLSchema", "int") & namespaces -> xs:int
+ - QName("http://www.w3.org/2001/XMLSchema", "int") & ns_map -> xs:int
- QName("foo, "bar") -> {foo}bar
"""
@@ -294,12 +294,12 @@ class LxmlQNameConverter(Converter):
self, value: etree.QName, ns_map: Optional[Dict] = None, **kwargs: Any
) -> str:
"""
- Convert a QName instance to string either with a namespace prefix if
- context namespaces are provided or as fully qualified with the
- namespace uri.
+ Convert a QName instance to string either with a namespace prefix if a
+ prefix-URI namespaces mapping is provided or to a fully qualified name
+ with the namespace.
examples:
- - QName("http://www.w3.org/2001/XMLSchema", "int") & namespaces -> xs:int
+ - QName("http://www.w3.org/2001/XMLSchema", "int") & ns_map -> xs:int
- QName("foo, "bar") -> {foo}bar
"""
@@ -319,17 +319,33 @@ class EnumConverter(Converter):
# Convert string value to the type of the first enum member first, otherwise
# more complex types like QName, Decimals will fail.
- enum_member: Enum = list(data_type)[0]
- real_value = converter.from_string(value, [type(enum_member.value)], **kwargs)
+ member: Enum = list(data_type)[0]
+ value_type = type(member.value)
+
+ # Suppress warnings
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ real_value = converter.from_string(value, [value_type], **kwargs)
+
+ # Raise exception if the real value doesn't match the expected type.
+ if not isinstance(real_value, value_type):
+ raise ConverterError()
+
+ # Attempt #1 use the enum constructor
+ with contextlib.suppress(ValueError):
+ return data_type(real_value)
try:
- try:
- return data_type(real_value)
- except ValueError:
- # enums may be derived from xs:NMTOKENS or xs:list
- # try again after removing excess whitespace.
+ # Attempt #2 the enum might be derived from
+ # xs:NMTOKENS or xs:list removing excess whitespace.
+ if isinstance(real_value, str):
return data_type(" ".join(value.split()))
- except ValueError:
+
+ # Attempt #3 some values are never equal try to match
+ # canonical representations.
+ repr_value = repr(real_value)
+ return next(x for x in data_type if repr(x.value) == repr_value)
+ except (ValueError, StopIteration):
raise ConverterError()
def to_string(self, value: Enum, **kwargs: Any) -> str:
|
tefra/xsdata
|
12fc270c6a63dfe21222f30bb65a5ca317a86ba4
|
diff --git a/tests/formats/test_converter.py b/tests/formats/test_converter.py
index 11661a63..cfc8fc11 100644
--- a/tests/formats/test_converter.py
+++ b/tests/formats/test_converter.py
@@ -242,10 +242,7 @@ class EnumConverterTests(TestCase):
with warnings.catch_warnings(record=True) as w:
convert("a", data_type=Fixture)
- self.assertEqual(
- "Failed to convert value `a` to one of [<class 'float'>]",
- str(w[-1].message),
- )
+ self.assertEqual(0, len(w))
self.assertEqual(Fixture.two_point_one, convert("2.1", data_type=Fixture))
@@ -256,6 +253,16 @@ class EnumConverterTests(TestCase):
convert = self.converter.from_string
self.assertEqual(Fixture.a, convert(" a \na a ", data_type=Fixture))
+ def test_from_string_with_value_never_equal_to_anything(self):
+ class Fixture(Enum):
+ a = Decimal("NaN")
+
+ convert = self.converter.from_string
+ self.assertEqual(Fixture.a, convert("NaN", data_type=Fixture))
+
+ with self.assertRaises(ConverterError):
+ convert("1.0", data_type=Fixture)
+
def test_from_string_raises_exception_on_missing_data_type(self):
with self.assertRaises(ConverterError) as cm:
self.converter.from_string("a")
|
Enum converter Decimal('NaN') != Decimal('NaN')
This interesting behavior is failing the Enum converter
```python
In [1]: from enum import Enum
In [2]: from decimal import Decimal
In [3]: class Value(Enum):
...: VALUE_9_99 = Decimal('9.99')
...: NAN = Decimal('NaN')
...:
In [6]: Value(Decimal('9.99'))
Out[6]: <Value.VALUE_9_99: Decimal('9.99')>
In [7]: Value(Decimal('NaN'))
ValueError: Decimal('NaN') is not a valid Value
```
```python
In [8]: Decimal('NaN') == Decimal('NaN')
Out[8]: False
```
|
0.0
|
12fc270c6a63dfe21222f30bb65a5ca317a86ba4
|
[
"tests/formats/test_converter.py::EnumConverterTests::test_from_string",
"tests/formats/test_converter.py::EnumConverterTests::test_from_string_with_value_never_equal_to_anything"
] |
[
"tests/formats/test_converter.py::ConverterAdapterTests::test_from_string",
"tests/formats/test_converter.py::ConverterAdapterTests::test_register_converter",
"tests/formats/test_converter.py::ConverterAdapterTests::test_register_converter_with_lambda",
"tests/formats/test_converter.py::ConverterAdapterTests::test_to_string",
"tests/formats/test_converter.py::StrConverterTests::test_from_string",
"tests/formats/test_converter.py::StrConverterTests::test_to_string",
"tests/formats/test_converter.py::BoolConverterTests::test_from_string",
"tests/formats/test_converter.py::BoolConverterTests::test_to_string",
"tests/formats/test_converter.py::IntConverterTests::test_from_string",
"tests/formats/test_converter.py::IntConverterTests::test_to_string",
"tests/formats/test_converter.py::FloatConverterTests::test_from_string",
"tests/formats/test_converter.py::FloatConverterTests::test_to_string",
"tests/formats/test_converter.py::DecimalConverterTests::test_from_string",
"tests/formats/test_converter.py::DecimalConverterTests::test_to_string",
"tests/formats/test_converter.py::LxmlQNameConverterTests::test_from_string",
"tests/formats/test_converter.py::LxmlQNameConverterTests::test_to_string",
"tests/formats/test_converter.py::QNameConverterTests::test_from_string",
"tests/formats/test_converter.py::QNameConverterTests::test_to_string",
"tests/formats/test_converter.py::EnumConverterTests::test_from_string_raises_exception_on_missing_data_type",
"tests/formats/test_converter.py::EnumConverterTests::test_from_string_with_list_derived_enum",
"tests/formats/test_converter.py::EnumConverterTests::test_to_string",
"tests/formats/test_converter.py::ProxyConverterTests::test_from_string",
"tests/formats/test_converter.py::ProxyConverterTests::test_to_string"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-29 15:58:09+00:00
|
mit
| 5,840 |
|
tefra__xsdata-358
|
diff --git a/docs/xml.rst b/docs/xml.rst
index 47ba1e30..61e62dcd 100644
--- a/docs/xml.rst
+++ b/docs/xml.rst
@@ -167,6 +167,9 @@ context instance between them to save on memory and processing.
* - xml_version
- str
- XML Version number, default: ``1.0``
+ * - xml_declaration
+ - bool
+ - Generate XML declaration, default ``True``
* - pretty_print
- bool
- Enable pretty output, default ``False``
diff --git a/xsdata/formats/dataclass/serializers/config.py b/xsdata/formats/dataclass/serializers/config.py
index e7369247..4a408c4a 100644
--- a/xsdata/formats/dataclass/serializers/config.py
+++ b/xsdata/formats/dataclass/serializers/config.py
@@ -8,6 +8,7 @@ class SerializerConfig:
"""
:param encoding: Text encoding
:param xml_version: XML Version number (1.0|1.1)
+ :param xml_declaration: Generate XML declaration
:param pretty_print: Enable pretty output
:param schema_location: Specify the xsi:schemaLocation attribute value
:param no_namespace_schema_location: Specify the xsi:noNamespaceSchemaLocation
@@ -16,6 +17,7 @@ class SerializerConfig:
encoding: str = field(default="UTF-8")
xml_version: str = field(default="1.0")
+ xml_declaration: bool = field(default=True)
pretty_print: bool = field(default=False)
schema_location: Optional[str] = field(default=None)
no_namespace_schema_location: Optional[str] = field(default=None)
diff --git a/xsdata/formats/dataclass/serializers/mixins.py b/xsdata/formats/dataclass/serializers/mixins.py
index 76b8a597..c4b44989 100644
--- a/xsdata/formats/dataclass/serializers/mixins.py
+++ b/xsdata/formats/dataclass/serializers/mixins.py
@@ -91,8 +91,9 @@ class XmlWriter:
self.handler.endDocument()
def start_document(self):
- self.output.write(f'<?xml version="{self.config.xml_version}"')
- self.output.write(f' encoding="{self.config.encoding}"?>\n')
+ if self.config.xml_declaration:
+ self.output.write(f'<?xml version="{self.config.xml_version}"')
+ self.output.write(f' encoding="{self.config.encoding}"?>\n')
def start_tag(self, qname: str):
"""
|
tefra/xsdata
|
a2c51f5bcdcaf2be620a43c9f80f831da16cefc8
|
diff --git a/tests/formats/dataclass/serializers/writers/test_lxml.py b/tests/formats/dataclass/serializers/writers/test_lxml.py
index d8cb5ff9..2d92f070 100644
--- a/tests/formats/dataclass/serializers/writers/test_lxml.py
+++ b/tests/formats/dataclass/serializers/writers/test_lxml.py
@@ -41,6 +41,14 @@ class LxmlEventWriterTests(TestCase):
self.assertEqual('<?xml version="1.1" encoding="US-ASCII"?>', xml_declaration)
+ def test_declaration_disabled(self):
+ self.serializer.config.xml_declaration = False
+ actual = self.serializer.render(books, {None: "urn:books"})
+ expected = fixtures_dir.joinpath("books/books_default_ns.xml").read_text()
+ xml_declaration, expected = expected.split("\n", 1)
+
+ self.assertEqual(expected, actual)
+
def test_pretty_print_false(self):
self.serializer.config.pretty_print = False
actual = self.serializer.render(books)
diff --git a/tests/formats/dataclass/serializers/writers/test_native.py b/tests/formats/dataclass/serializers/writers/test_native.py
index 30185195..309a9a78 100644
--- a/tests/formats/dataclass/serializers/writers/test_native.py
+++ b/tests/formats/dataclass/serializers/writers/test_native.py
@@ -35,6 +35,14 @@ class XmlEventWriterTests(TestCase):
self.assertEqual('<?xml version="1.1" encoding="US-ASCII"?>', xml_declaration)
+ def test_declaration_disabled(self):
+ self.serializer.config.xml_declaration = False
+ actual = self.serializer.render(books, {None: "urn:books"})
+ expected = fixtures_dir.joinpath("books/books_default_ns.xml").read_text()
+ xml_declaration, expected = expected.split("\n", 1)
+
+ self.assertEqual(expected, actual)
+
def test_pretty_print_false(self):
self.serializer.config.pretty_print = False
actual = self.serializer.render(books)
|
SerializerConfig introduced 2 issues
Hello,
1st issue is that the constructor interface for XmlSerializer changed.
```python
XmlSerializer(encoding=None)
```
no longer works. This is a smaller issue as I can change my code to support the new library (but it's still a change that isn't backwards compatible)
2nd issue is that the XmlWriter no longer supports encoding=None. This is a blocker, as I have a rest API that will not accept a payload with an encoding.
|
0.0
|
a2c51f5bcdcaf2be620a43c9f80f831da16cefc8
|
[
"tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_declaration_disabled",
"tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_declaration_disabled"
] |
[
"tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_encoding",
"tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_pretty_print_false",
"tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_render",
"tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_render_with_default_namespace_prefix",
"tests/formats/dataclass/serializers/writers/test_lxml.py::LxmlEventWriterTests::test_render_with_provided_namespaces",
"tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_encoding",
"tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_pretty_print_false",
"tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_render",
"tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_render_with_default_namespace_prefix",
"tests/formats/dataclass/serializers/writers/test_native.py::XmlEventWriterTests::test_render_with_provided_namespaces"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-17 22:53:17+00:00
|
mit
| 5,841 |
|
tefra__xsdata-364
|
diff --git a/xsdata/formats/dataclass/serializers/xml.py b/xsdata/formats/dataclass/serializers/xml.py
index 487ce89d..7a0e88f7 100644
--- a/xsdata/formats/dataclass/serializers/xml.py
+++ b/xsdata/formats/dataclass/serializers/xml.py
@@ -161,8 +161,8 @@ class XmlSerializer(AbstractSerializer):
def write_tokens(self, value: Any, var: XmlVar, namespace: NoneStr) -> Generator:
"""Produce an events stream for the given tokens list or list of tokens
lists."""
- if value:
- if isinstance(value[0], list):
+ if value or var.nillable:
+ if value and isinstance(value[0], list):
for val in value:
yield from self.write_element(val, var, namespace)
else:
|
tefra/xsdata
|
ff428d68c61f254609465012cc62c49f3b88e575
|
diff --git a/tests/formats/dataclass/serializers/test_xml.py b/tests/formats/dataclass/serializers/test_xml.py
index 10f1a9fe..7f065a9e 100644
--- a/tests/formats/dataclass/serializers/test_xml.py
+++ b/tests/formats/dataclass/serializers/test_xml.py
@@ -167,6 +167,17 @@ class XmlSerializerTests(TestCase):
result = self.serializer.write_value([[1, 2, 3], [4, 5, 6]], var, "xsdata")
self.assertEqual(expected, list(result))
+ var = XmlElement(qname="a", name="a", tokens=True, nillable=True)
+ expected = [
+ (XmlWriterEvent.START, "a"),
+ (XmlWriterEvent.ATTR, QNames.XSI_NIL, "true"),
+ (XmlWriterEvent.DATA, []),
+ (XmlWriterEvent.END, "a"),
+ ]
+
+ result = self.serializer.write_value([], var, "xsdata")
+ self.assertEqual(expected, list(result))
+
def test_write_any_type_with_primitive(self):
var = XmlWildcard(qname="a", name="a")
expected = [(XmlWriterEvent.DATA, "str")]
|
XmlSerializer render empty nillable tokens lists
|
0.0
|
ff428d68c61f254609465012cc62c49f3b88e575
|
[
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_tokens"
] |
[
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_attribute",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_next_value",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_render_mixed_content",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_generic_object",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_any_type_with_primitive_element",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_when_no_matching_choice_exists",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_dataclass",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_derived_primitive_value",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_generic_object",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_choice_with_raw_value",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_data",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_can_overwrite_params",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_dataclass_with_no_dataclass",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_any_type_var_ignore_xs_string",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_element_with_nillable_true",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_mixed_content",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_object_with_derived_element",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_list_value",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_value_with_unhandled_xml_var",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_derived_class",
"tests/formats/dataclass/serializers/test_xml.py::XmlSerializerTests::test_write_xsi_type_with_illegal_derived_class"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-20 15:52:19+00:00
|
mit
| 5,842 |
|
tehmaze__ansi-34
|
diff --git a/ansi/colour/rgb.py b/ansi/colour/rgb.py
index 13fbc02..6eb1e29 100644
--- a/ansi/colour/rgb.py
+++ b/ansi/colour/rgb.py
@@ -56,7 +56,7 @@ def rgb16(r: int, g: int, b: int) -> str:
return rgb_reduce(r, g, b, 16)
-def rgb256(r: int, g: int, b: int) -> str:
+def rgb256(r: int, g: int, b: int, bg: bool=False) -> str:
"""
Convert an RGB colour to 256 colour ANSI graphics.
@@ -79,4 +79,4 @@ def rgb256(r: int, g: int, b: int) -> str:
colour = sum([16] + [int(6 * float(val) / 256) * mod
for val, mod in ((r, 36), (g, 6), (b, 1))])
- return sequence('m', fields=3)(38, 5, colour)
+ return sequence('m', fields=3)(38 if not bg else 48, 5, colour)
|
tehmaze/ansi
|
f80c14bcee8a9c4b4aecbd88c24ba4818c64db77
|
diff --git a/test_ansi.py b/test_ansi.py
index a12d704..a15a75e 100644
--- a/test_ansi.py
+++ b/test_ansi.py
@@ -40,6 +40,11 @@ def test_rgb() -> None:
msg = (rgb256(0xff, 0x80, 0x00), 'hello world', reset)
assert ''.join(map(str, msg)) == '\x1b[38;5;214mhello world\x1b[0m'
+def test_rgb_bg() -> None:
+ from ansi.colour.rgb import rgb256
+ from ansi.colour.fx import reset
+ msg = (rgb256(0xff, 0x80, 0x00, bg=True), 'hello world', reset)
+ assert ''.join(map(str, msg)) == '\x1b[48;5;214mhello world\x1b[0m'
def test_osc() -> None:
from ansi import osc
|
RGB background color support
hi!
Adding a switch to `rgb256(r,g,b, bg=True)` you could return
```
if bg:
return sequence('m', fields=3)(48, 5, colour)
else:
return sequence('m', fields=3)(38, 5, colour)
```
|
0.0
|
f80c14bcee8a9c4b4aecbd88c24ba4818c64db77
|
[
"test_ansi.py::test_rgb_bg"
] |
[
"test_ansi.py::test_import",
"test_ansi.py::test_import_color",
"test_ansi.py::test_fg_bg",
"test_ansi.py::test_sugar",
"test_ansi.py::test_rgb",
"test_ansi.py::test_osc",
"test_ansi.py::test_iterm",
"test_ansi.py::test_add",
"test_ansi.py::test_add_to_string",
"test_ansi.py::test_add_other",
"test_ansi.py::test_empty",
"test_ansi.py::test_erase"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-22 00:42:39+00:00
|
mit
| 5,843 |
|
templateflow__python-client-59
|
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 115902f..f9399f7 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -45,7 +45,7 @@ jobs:
source /tmp/venv/bin/activate
pip install -U pip
pip install -r /tmp/src/templateflow/requirements.txt
- pip install "datalad ~= 0.11.8" doi2bib
+ pip install "datalad ~= 0.11.8" "doi2bib < 0.4"
pip install "setuptools>=42.0" "setuptools_scm[toml] >= 3.4" twine codecov
- run:
diff --git a/setup.cfg b/setup.cfg
index 16b5cc9..1012368 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -49,7 +49,7 @@ exclude =
[options.extras_require]
citations =
- doi2bib
+ doi2bib < 0.4.0
datalad =
datalad ~= 0.12.0
doc =
diff --git a/templateflow/conf/__init__.py b/templateflow/conf/__init__.py
index f4b387b..9d29cf2 100644
--- a/templateflow/conf/__init__.py
+++ b/templateflow/conf/__init__.py
@@ -45,11 +45,19 @@ please set the TEMPLATEFLOW_HOME environment variable.\
def update(local=False, overwrite=True, silent=False):
"""Update an existing DataLad or S3 home."""
if TF_USE_DATALAD and _update_datalad():
- return True
-
- from ._s3 import update as _update_s3
+ success = True
+ else:
+ from ._s3 import update as _update_s3
+ success = _update_s3(TF_HOME, local=local, overwrite=overwrite, silent=silent)
- return _update_s3(TF_HOME, local=local, overwrite=overwrite, silent=silent)
+ # update Layout only if necessary
+ if success and TF_LAYOUT is not None:
+ init_layout()
+ # ensure the api uses the updated layout
+ import importlib
+ from .. import api
+ importlib.reload(api)
+ return success
def setup_home(force=False):
@@ -76,9 +84,12 @@ def _update_datalad():
TF_LAYOUT = None
-try:
+
+
+def init_layout():
from .bids import Layout
+ global TF_LAYOUT
TF_LAYOUT = Layout(
TF_HOME,
validate=False,
@@ -92,5 +103,9 @@ try:
"scripts",
],
)
+
+
+try:
+ init_layout()
except ImportError:
pass
diff --git a/templateflow/conf/_s3.py b/templateflow/conf/_s3.py
index 9e20cbe..4051ce8 100644
--- a/templateflow/conf/_s3.py
+++ b/templateflow/conf/_s3.py
@@ -27,7 +27,7 @@ def _get_skeleton_file():
import requests
try:
- r = requests.get(TF_SKEL_URL(release="master", ext="md5", allow_redirects=True))
+ r = requests.get(TF_SKEL_URL(release="master", ext="md5"), allow_redirects=True)
except requests.exceptions.ConnectionError:
return
@@ -35,7 +35,7 @@ def _get_skeleton_file():
return
if r.content.decode().split()[0] != TF_SKEL_MD5:
- r = requests.get(TF_SKEL_URL(release="master", ext="zip", allow_redirects=True))
+ r = requests.get(TF_SKEL_URL(release="master", ext="zip"), allow_redirects=True)
if r.ok:
from os import close
|
templateflow/python-client
|
1c473dfa23eac7eed8cefd1ce9fad5ad52331a2e
|
diff --git a/templateflow/tests/test_conf.py b/templateflow/tests/test_conf.py
new file mode 100644
index 0000000..c2d95f2
--- /dev/null
+++ b/templateflow/tests/test_conf.py
@@ -0,0 +1,27 @@
+from pathlib import Path
+import pytest
+from .. import conf, api
+
+
[email protected](conf.TF_USE_DATALAD, reason="S3 only")
+def test_update_s3(tmp_path):
+ conf.TF_HOME = tmp_path / 'templateflow'
+ conf.TF_HOME.mkdir(exist_ok=True)
+
+ # replace TF_SKEL_URL with the path of a legacy skeleton
+ _skel_url = conf._s3.TF_SKEL_URL
+ conf._s3.TF_SKEL_URL = (
+ "https://github.com/templateflow/python-client/raw/0.5.0/"
+ "templateflow/conf/templateflow-skel.{ext}".format
+ )
+ # initialize templateflow home, making sure to pull the legacy skeleton
+ conf.update(local=False)
+ # ensure we can grab a file
+ assert Path(api.get('MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask')).exists()
+ # and ensure we can't fetch one that doesn't yet exist
+ assert not api.get('Fischer344', hemi='L', desc='brain', suffix='mask')
+
+ # refresh the skeleton using the most recent skeleton
+ conf._s3.TF_SKEL_URL = _skel_url
+ conf.update(local=True, overwrite=True)
+ assert Path(api.get('Fischer344', hemi='L', desc='brain', suffix='mask')).exists()
|
TemplateFlow should re-index the BIDS layout after update
Currently, when `TEMPLATEFLOW_HOME` has been updated in the same python session, the index of the archive remains out-of-date.
|
0.0
|
1c473dfa23eac7eed8cefd1ce9fad5ad52331a2e
|
[
"templateflow/tests/test_conf.py::test_update_s3"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-17 21:56:30+00:00
|
apache-2.0
| 5,844 |
|
tempoCollaboration__OQuPy-71
|
diff --git a/oqupy/mps_mpo.py b/oqupy/mps_mpo.py
index 29e080c..6a24a85 100644
--- a/oqupy/mps_mpo.py
+++ b/oqupy/mps_mpo.py
@@ -198,14 +198,13 @@ def compute_nn_gate(
nn_gate: NnGate
Nearest neighbor gate.
"""
- # exponentiate and transpose such that
- # axis 0 is the input and axis 1 is the output leg of the propagator.
- propagator = linalg.expm(dt*liouvillian).T
+ # exponentiate the liouvillian to become a propagator
+ propagator = linalg.expm(dt*liouvillian)
# split leg 0 and leg 1 each into left and right.
- propagator.shape = [hs_dim_l**2,
- hs_dim_r**2,
- hs_dim_l**2,
- hs_dim_r**2]
+ propagator.shape = [hs_dim_l**2, # left output
+ hs_dim_r**2, # right output
+ hs_dim_l**2, # left input
+ hs_dim_r**2] # right input
temp = np.swapaxes(propagator, 1, 2)
temp = temp.reshape([hs_dim_l**2 * hs_dim_l**2,
hs_dim_r**2 * hs_dim_r**2])
@@ -217,7 +216,9 @@ def compute_nn_gate(
sqrt_s = np.sqrt(s)
u_sqrt_s = u * sqrt_s
sqrt_s_vh =(sqrt_s * vh.T).T
+ # left tensor with legs: left output, left input, bond
tensor_l = u_sqrt_s.reshape(hs_dim_l**2, hs_dim_l**2, chi)
+ # right tensor with legs: bond, right output, right input
tensor_r = sqrt_s_vh.reshape(chi, hs_dim_r**2, hs_dim_r**2)
return NnGate(site=site, tensors=(tensor_l, tensor_r))
diff --git a/oqupy/operators.py b/oqupy/operators.py
index 2566dcf..8dde09e 100644
--- a/oqupy/operators.py
+++ b/oqupy/operators.py
@@ -197,7 +197,7 @@ def cross_left_right_super(
operator_2_l: ndarray,
operator_2_r: ndarray) -> ndarray:
"""
- Construct anit-commutator of cross term (acting on two Hilbert spaces).
+ Contruct map from rho to [(op1l x op2l) rho (op1r x op2r)].
"""
op1l_op1r = np.kron(operator_1_l, operator_1_r.T)
op2l_op2r = np.kron(operator_2_l, operator_2_r.T)
diff --git a/oqupy/system.py b/oqupy/system.py
index a7d11c1..184a68b 100644
--- a/oqupy/system.py
+++ b/oqupy/system.py
@@ -429,7 +429,8 @@ class SystemChain(BaseAPIClass):
self._nn_liouvillians = []
for hs_dim_l, hs_dim_r in zip(self._hs_dims[:-1], self._hs_dims[1:]):
self._nn_liouvillians.append(
- np.zeros((hs_dim_l**4, hs_dim_r**4), dtype=NpDtype))
+ np.zeros((hs_dim_l**2 * hs_dim_r**2, hs_dim_l**2 * hs_dim_r**2),
+ dtype=NpDtype))
super().__init__(name, description)
@@ -496,7 +497,7 @@ class SystemChain(BaseAPIClass):
liouvillian: ndarray
Liouvillian acting on the single site.
"""
- raise NotImplementedError()
+ self._site_liouvillians[site] += np.array(liouvillian, dtype=NpDtype)
def add_site_dissipation(
self,
@@ -525,12 +526,13 @@ class SystemChain(BaseAPIClass):
gamma: float
Optional multiplicative factor :math:`\gamma`.
"""
- op = lindblad_operator
+ op = np.array(lindblad_operator, dtype=NpDtype)
op_dagger = op.conjugate().T
self._site_liouvillians[site] += \
- gamma * (opr.left_right_super(op, op_dagger)
+ gamma * (opr.left_right_super(op, op_dagger) \
- 0.5 * opr.acommutator(np.dot(op_dagger, op)))
+
def add_nn_hamiltonian(
self,
site: int,
@@ -585,7 +587,7 @@ class SystemChain(BaseAPIClass):
liouvillian_l_r: ndarray
Liouvillian acting on sites :math:`n` and :math:`n+1`.
"""
- self._nn_liouvillians[site] += liouvillian_l_r
+ self._nn_liouvillians[site] += np.array(liouvillian_l_r, dtype=NpDtype)
def add_nn_dissipation(
self,
|
tempoCollaboration/OQuPy
|
b3355f4c8a6e7001275e78c287d52f6d25c96e53
|
diff --git a/tests/coverage/pt_tebd_test.py b/tests/coverage/pt_tebd_test.py
index 80e47fc..b2fcc54 100644
--- a/tests/coverage/pt_tebd_test.py
+++ b/tests/coverage/pt_tebd_test.py
@@ -17,11 +17,12 @@ Tests for the time_evovling_mpo.pt_tebd module.
import pytest
+import numpy as np
import oqupy
up_dm = oqupy.operators.spin_dm("z+")
-system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2])
-initial_augmented_mps = oqupy.AugmentedMPS([up_dm, up_dm])
+system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,3])
+initial_augmented_mps = oqupy.AugmentedMPS([up_dm, np.diag([1,0,0])])
pt_tebd_params = oqupy.PtTebdParameters(dt=0.2, order=2, epsrel=1.0e-4)
def test_get_augmented_mps():
@@ -32,8 +33,10 @@ def test_get_augmented_mps():
parameters=pt_tebd_params)
augmented_mps = pt_tebd.get_augmented_mps()
- assert augmented_mps.gammas[1].shape == (1,4,1,1)
+ assert augmented_mps.gammas[0].shape == (1,4,1,1)
+ assert augmented_mps.gammas[1].shape == (1,9,1,1)
- pt_tebd.compute(end_step=1, progress_type='silent')
+ pt_tebd.compute(end_step=2, progress_type='silent')
augmented_mps = pt_tebd.get_augmented_mps()
- assert augmented_mps.gammas[1].shape == (1,4,1,1)
+ assert augmented_mps.gammas[0].shape == (1,4,1,1)
+ assert augmented_mps.gammas[1].shape == (1,9,1,1)
diff --git a/tests/physics/example_H_test.py b/tests/physics/example_H_test.py
new file mode 100644
index 0000000..2688cc7
--- /dev/null
+++ b/tests/physics/example_H_test.py
@@ -0,0 +1,101 @@
+# Copyright 2020 The TEMPO Collaboration
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Tests for the time_evovling_mpo.backends.tensor_network modules.
+"""
+import sys
+sys.path.insert(0,'.')
+
+import pytest
+import numpy as np
+
+import oqupy
+
+# -----------------------------------------------------------------------------
+# -- Test F: Test Lindblad dissipation for PT-TEBD ---------------------------
+
+# --- Parameters --------------------------------------------------------------
+
+# -- time steps --
+dt = 0.1
+num_steps = 10
+
+# -- bath --
+alpha = 0.3
+omega_cutoff = 3.0
+temperature = 0.8
+pt_dkmax = 10
+pt_epsrel = 1.0e-6
+
+# -- chain --
+N = 5
+Omega = 1.0
+eta = 0.3
+Delta = 1.2
+h = np.array(
+ [[1.0, 0.0, 0.0],
+ [2.0, 0.0, 0.0],
+ [3.0, 0.0, 0.0],
+ [4.0, 0.0, 0.0],
+ [5.0, 0.0, 0.0]]) * np.pi / 10
+J = np.array([[Delta, 1.0+eta, 1.0-eta]]*(N-1))
+up_dm = oqupy.operators.spin_dm("z+")
+down_dm = oqupy.operators.spin_dm("z-")
+tebd_order = 2
+tebd_epsrel = 1.0e-7
+
+
+def test_pt_tebd_site_dissipation_H1():
+ # -- initial state --
+ initial_augmented_mps = oqupy.AugmentedMPS([up_dm, down_dm, down_dm])
+
+ # -- add single site dissipation --
+ system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2,2])
+ # lowering operator on site 0:
+ system_chain.add_site_dissipation(0,[[0,0],[1,0]])
+ # identity cross raising operator on sites 1 and 2:
+ system_chain.add_nn_dissipation(1,np.identity(2),[[0,1],[0,0]])
+
+ # -- PT-TEBD parameters --
+ pt_tebd_params = oqupy.PtTebdParameters(
+ dt=dt,
+ order=tebd_order,
+ epsrel=tebd_epsrel)
+
+ num_steps = int(1.0/pt_tebd_params.dt)
+
+ pt_tebd = oqupy.PtTebd(
+ initial_augmented_mps=initial_augmented_mps,
+ system_chain=system_chain,
+ process_tensors=[None]*3,
+ parameters=pt_tebd_params,
+ dynamics_sites=[0,1,2],
+ chain_control=None)
+
+ r = pt_tebd.compute(num_steps, progress_type="silent")
+
+ np.testing.assert_almost_equal(
+ r['dynamics'][0].states[-1],
+ [[np.exp(-1),0],[0,1-np.exp(-1)]],
+ decimal=4)
+ np.testing.assert_almost_equal(
+ r['dynamics'][1].states[-1],
+ [[0,0],[0,1]],
+ decimal=4)
+ np.testing.assert_almost_equal(
+ r['dynamics'][2].states[-1],
+ [[1-np.exp(-1),0],[0,np.exp(-1)]],
+ decimal=4)
+
+# -----------------------------------------------------------------------------
|
Bug in SystemChain.add_site_dissipation()
Adding a Markovian Lindblad dissipator to a system chain seems to go wrong, as one can see by the decay of the norm in the following example with dissipation on the first of a two site chain (without any coherent evolution):
```python3
import oqupy
import numpy as np
import matplotlib.pyplot as plt
sigma_z = oqupy.operators.sigma("z")
sigma_minus = oqupy.operators.sigma("-")
up_dm = oqupy.operators.spin_dm("z+")
down_dm = oqupy.operators.spin_dm("z-")
initial_augmented_mps = oqupy.AugmentedMPS([up_dm, down_dm])
system_chain = oqupy.SystemChain(hilbert_space_dimensions=[2,2])
system_chain.add_site_dissipation(0, sigma_minus, gamma=0.2)
pt_tebd_params = oqupy.PtTebdParameters(
dt=0.1,
order=2,
epsrel=1.0e-6)
pt_tebd = oqupy.PtTebd(
initial_augmented_mps=initial_augmented_mps,
system_chain=system_chain,
process_tensors=[None, None, None, None, None],
parameters=pt_tebd_params,
dynamics_sites=[0, 1],
chain_control=None)
num_steps = 20
results = pt_tebd.compute(num_steps, progress_type="bar")
plt.plot(results['norm'].real)
```
The norm drops below 0.7 in 20 time steps, which seems to be a real bug and not just a numerical error.
|
0.0
|
b3355f4c8a6e7001275e78c287d52f6d25c96e53
|
[
"tests/coverage/pt_tebd_test.py::test_get_augmented_mps",
"tests/physics/example_H_test.py::test_pt_tebd_site_dissipation_H1"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-07 14:28:26+00:00
|
apache-2.0
| 5,845 |
|
tempoCollaboration__OQuPy-74
|
diff --git a/.gitignore b/.gitignore
index 73b69be..9057a01 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
# TEMPO file formats
*.tempoDynamics
*.processTensor
+*.hdf5
# local development
local_dev/
diff --git a/oqupy/process_tensor.py b/oqupy/process_tensor.py
index b80569f..d37f05e 100644
--- a/oqupy/process_tensor.py
+++ b/oqupy/process_tensor.py
@@ -533,10 +533,10 @@ class FileProcessTensor(BaseProcessTensor):
# transforms
transform_in = np.array(self._f["transform_in"])
- if transform_in == 0.0:
+ if np.allclose(transform_in, np.array([0.0])):
transform_in = None
transform_out = np.array(self._f["transform_out"])
- if transform_out == 0.0:
+ if np.allclose(transform_out, np.array([0.0])):
transform_out = None
# initial tensor and mpo/cap/lam tensors
|
tempoCollaboration/OQuPy
|
be1c8bc45db3411aaebc213c2b2f52cb8d52e55f
|
diff --git a/tests/coverage/process_tensor_test.py b/tests/coverage/process_tensor_test.py
index 32879e5..89acbef 100644
--- a/tests/coverage/process_tensor_test.py
+++ b/tests/coverage/process_tensor_test.py
@@ -1,4 +1,4 @@
-# Copyright 2022 The TEMPO Collaboration
+# Copyright 2022 The oqupy Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,43 +17,57 @@ Tests for the time_evovling_mpo.process_tensor module.
import pytest
-import oqupy as tempo
+import oqupy
-
-TEMP_FILE = "tests/data/temp.processTensor"
+TEMP_FILE_1 = "./tests/data/temp1.hdf5"
+TEMP_FILE_2 = "./tests/data/temp2.hdf5"
# -- prepare a process tensor -------------------------------------------------
-system = tempo.System(tempo.operators.sigma("x"))
-initial_state = tempo.operators.spin_dm("z+")
-correlations = tempo.PowerLawSD(
+system = oqupy.System(oqupy.operators.sigma("x"))
+initial_state = oqupy.operators.spin_dm("z+")
+correlations = oqupy.PowerLawSD(
alpha=0.3,
zeta=1.0,
cutoff=5.0,
cutoff_type="exponential",
temperature=0.2,
name="ohmic")
-bath = tempo.Bath(
- 0.5*tempo.operators.sigma("z"),
+bath1 = oqupy.Bath(
+ 0.5*oqupy.operators.sigma("z"),
+ correlations,
+ name="phonon bath")
+bath2 = oqupy.Bath(
+ 0.5*oqupy.operators.sigma("x"),
correlations,
name="phonon bath")
-tempo_params = tempo.TempoParameters(
+tempo_params = oqupy.TempoParameters(
dt=0.1,
dkmax=5,
epsrel=10**(-5))
-pt = tempo.pt_tempo_compute(
- bath,
+pt1 = oqupy.pt_tempo_compute(
+ bath1,
+ start_time=0.0,
+ end_time=0.3,
+ parameters=tempo_params)
+pt2 = oqupy.pt_tempo_compute(
+ bath2,
start_time=0.0,
- end_time=1.0,
+ end_time=0.3,
parameters=tempo_params)
-pt.export(TEMP_FILE, overwrite=True)
-del pt
+pt1.export(TEMP_FILE_1, overwrite=True)
+pt2.export(TEMP_FILE_2, overwrite=True)
+del pt1
+del pt2
def test_process_tensor():
- pt = tempo.import_process_tensor(TEMP_FILE, process_tensor_type="simple")
- str(pt)
- pt.get_bond_dimensions()
+ pt1 = oqupy.import_process_tensor(TEMP_FILE_1, process_tensor_type="simple")
+ str(pt1)
+ pt1.get_bond_dimensions()
with pytest.raises(OSError):
- pt.export(TEMP_FILE)
+ pt1.export(TEMP_FILE_1)
+ pt2 = oqupy.import_process_tensor(TEMP_FILE_2, process_tensor_type="file")
+ str(pt2)
+ pt2.get_bond_dimensions()
|
Bug when loading a process tensor from file
I've found a bug when loading a process tensor with non-diagonal coupling from a file.
Here is a (minimal) failing example:
```python
import oqupy
TEMP_FILE = "./temp-process-tensor.hdf5"
system = oqupy.System(oqupy.operators.sigma("x"))
initial_state = oqupy.operators.spin_dm("z+")
correlations = oqupy.PowerLawSD(
alpha=0.3,
zeta=1.0,
cutoff=5.0,
cutoff_type="exponential",
temperature=0.2,
name="ohmic")bath = oqupy.Bath(
0.5*oqupy.operators.sigma("x"),
correlations)
tempo_params = oqupy.TempoParameters(
dt=0.1,
dkmax=5,
epsrel=10**(-5))
pt = oqupy.pt_tempo_compute(
bath,
start_time=0.0,
end_time=0.3,
process_tensor_file=TEMP_FILE,
overwrite=True,
parameters=tempo_params)
del pt
pt = oqupy.import_process_tensor(TEMP_FILE, process_tensor_type="file")
```
This is the output:
```
--> PT-TEMPO computation:
100.0% 2 of 2 [########################################] 00:00:00
Elapsed time: 0.0s
Traceback (most recent call last):
File "./examples/fail.py", line 33, in <module>
pt = oqupy.import_process_tensor(TEMP_FILE, process_tensor_type="file")
File "./oqupy/process_tensor.py", line 729, in import_process_tensor
pt_file = FileProcessTensor(mode="read", filename=filename)
File "./oqupy/process_tensor.py", line 457, in __init__
dictionary = self._read_file(filename)
File "./oqupy/process_tensor.py", line 536, in _read_file
if transform_in == 0.0:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
|
0.0
|
be1c8bc45db3411aaebc213c2b2f52cb8d52e55f
|
[
"tests/coverage/process_tensor_test.py::test_process_tensor"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-07 08:09:54+00:00
|
apache-2.0
| 5,846 |
|
tenable__pyTenable-664
|
diff --git a/tenable/io/v3/base/iterators/explore_iterator.py b/tenable/io/v3/base/iterators/explore_iterator.py
index 737e03d..20c5dc2 100644
--- a/tenable/io/v3/base/iterators/explore_iterator.py
+++ b/tenable/io/v3/base/iterators/explore_iterator.py
@@ -69,7 +69,9 @@ class SearchIterator(ExploreIterator):
Process the API Response
'''
body = response.json()
- pagination = body.get('pagination', {})
+ # Pagination value can be null in JSON response, we need to make sure
+ # a dict is returned
+ pagination = body.get('pagination') or {}
self.page = body[self._resource]
self.total = pagination.get('total')
self._next_token = pagination.get('next')
|
tenable/pyTenable
|
08ce435d75dfa953931582ed2806ca289b7c5fe0
|
diff --git a/tests/io/test_search_iterator_v3.py b/tests/io/test_search_iterator_v3.py
index 8c063cd..8b896a5 100644
--- a/tests/io/test_search_iterator_v3.py
+++ b/tests/io/test_search_iterator_v3.py
@@ -2,6 +2,7 @@
Testing the search iterators for V3 endpoints
'''
import pytest
+import json
from tenable.io.v3.base.iterators.explore_iterator import SearchIterator
@@ -21,7 +22,6 @@ ASSET_DATA = [
]
-
@pytest.mark.vcr()
def test_search_iterator_v3(api):
'''
@@ -61,3 +61,22 @@ def test_search_iterator_v3(api):
with pytest.raises(StopIteration):
next(search_iterator)
+
+
+def test_search_iterator_v3_null_pagination(api):
+ '''
+ Test for null pagination in SearchIterator._process_response
+ '''
+ search_iterator = SearchIterator(
+ api=api
+ )
+ class TempJson:
+ def json(self):
+ return json.loads(json.dumps({'findings': [{'id': 'abcdef'}],
+ 'pagination': None
+ })
+ )
+ search_iterator._resource = "findings"
+ search_iterator._process_response(TempJson())
+ assert search_iterator.total == None
+ assert search_iterator._next_token == None
|
Crash when pagination value is null in JSON response
**Describe the bug**
The following program crashed, since sometime the server returns JSON data which are not correctly handle by the `SearchIterator` class in `explore_iterator.py`.
**To Reproduce**
Steps to reproduce the behavior:
I'm calling the API endpoint (`/api/v3/findings/vulnerabilities/webapp/search`) on cloud.tenable.com, using `tio.v3.explore.findings.search_webapp` function with some arguments:
```json
{
"limit": 200,
"filter": {
"and": [
{
"operator": "eq",
"value": "redacted",
"property": "asset_id"
}
]
},
"fields": [
"asset",
"definition.id",
"definition.vpr.score",
"definition.exploitability_ease"
],
"next": "**redacted**"
}
```
The server responds (sometimes):
```json
{"findings":[**redacted**],"pagination":null}
```
Since the python code is:
```python
def _process_response(self, response: Response) -> None:
'''
Process the API Response
'''
body = response.json()
pagination = body.get('pagination', {})
self.page = body[self._resource]
self.total = pagination.get('total')
self._next_token = pagination.get('next')
```
The key `pagination` actually exists in JSON response, so `pagination` variable will be `None` and not a dict.
Crash happens line 8 (line 72 in the actual code).
```
Traceback (most recent call last):
***redacted***
File "/redacted/lib/python3.10/site-packages/restfly/iterator.py", line 114, in __next__
return self.next() # noqa: PLE1102
File "/redacted/lib/python3.10/site-packages/restfly/iterator.py", line 140, in next
self._get_page()
File "/redacted/lib/python3.10/site-packages/tenable/io/v3/base/iterators/explore_iterator.py", line 59, in _get_page
self._process_response(resp)
File "/redacted/lib/python3.10/site-packages/tenable/io/v3/base/iterators/explore_iterator.py", line 74, in _process_response
self.total = pagination.get('total')
AttributeError: 'NoneType' object has no attribute 'get'
```
**Expected behavior**
The code should not crashed.
**System Information (please complete the following information):**
- OS: Linux
- Architecture 64bit
|
0.0
|
08ce435d75dfa953931582ed2806ca289b7c5fe0
|
[
"tests/io/test_search_iterator_v3.py::test_search_iterator_v3_null_pagination"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-01-06 15:40:08+00:00
|
mit
| 5,847 |
|
tensorflow__agents-913
|
diff --git a/tf_agents/environments/batched_py_environment.py b/tf_agents/environments/batched_py_environment.py
index 99fbc2b3..b5041374 100644
--- a/tf_agents/environments/batched_py_environment.py
+++ b/tf_agents/environments/batched_py_environment.py
@@ -26,7 +26,7 @@ from __future__ import print_function
from multiprocessing import dummy as mp_threads
from multiprocessing import pool
# pylint: enable=line-too-long
-from typing import Sequence, Optional
+from typing import Any, Optional, Sequence
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
@@ -182,6 +182,21 @@ class BatchedPyEnvironment(py_environment.PyEnvironment):
)
return nest_utils.stack_nested_arrays(time_steps)
+ def seed(self, seed: types.Seed) -> Any:
+ """Seeds the environment."""
+ return self._execute(lambda env: env.seed(seed), self._envs)
+
+ def get_state(self) -> Any:
+ """Returns the `state` of the environment."""
+ return self._execute(lambda env: env.get_state(), self._envs)
+
+ def set_state(self, state: Sequence[Any]) -> None:
+ """Restores the environment to a given `state`."""
+ self._execute(
+ lambda env_state: env_state[0].set_state(env_state[1]),
+ zip(self._envs, state)
+ )
+
def render(self, mode="rgb_array") -> Optional[types.NestedArray]:
if self._num_envs == 1:
img = self._envs[0].render(mode)
|
tensorflow/agents
|
27b851f4daad092345f07cd2525115a8f3ed5224
|
diff --git a/tf_agents/environments/batched_py_environment_test.py b/tf_agents/environments/batched_py_environment_test.py
index 9cdf9637..3fc6e4a4 100644
--- a/tf_agents/environments/batched_py_environment_test.py
+++ b/tf_agents/environments/batched_py_environment_test.py
@@ -38,10 +38,21 @@ class GymWrapperEnvironmentMock(random_py_environment.RandomPyEnvironment):
def __init__(self, *args, **kwargs):
super(GymWrapperEnvironmentMock, self).__init__(*args, **kwargs)
self._info = {}
+ self._state = {'seed': 0}
def get_info(self):
return self._info
+ def seed(self, seed):
+ self._state['seed'] = seed
+ return super(GymWrapperEnvironmentMock, self).seed(seed)
+
+ def get_state(self):
+ return self._state
+
+ def set_state(self, state):
+ self._state = state
+
def _step(self, action):
self._info['last_action'] = action
return super(GymWrapperEnvironmentMock, self)._step(action)
@@ -116,6 +127,32 @@ class BatchedPyEnvironmentTest(tf.test.TestCase, parameterized.TestCase):
self.assertAllEqual(info['last_action'], action)
gym_env.close()
+ @parameterized.parameters(*COMMON_PARAMETERS)
+ def test_seed_gym_env(self, multithreading):
+ num_envs = 5
+ gym_env = self._make_batched_mock_gym_py_environment(
+ multithreading, num_envs=num_envs
+ )
+
+ gym_env.seed(42)
+
+ actual_seeds = [state['seed'] for state in gym_env.get_state()]
+ self.assertEqual(actual_seeds, [42] * num_envs)
+ gym_env.close()
+
+ @parameterized.parameters(*COMMON_PARAMETERS)
+ def test_state_gym_env(self, multithreading):
+ num_envs = 5
+ gym_env = self._make_batched_mock_gym_py_environment(
+ multithreading, num_envs=num_envs
+ )
+ state = [{'value': i * 10} for i in range(num_envs)]
+
+ gym_env.set_state(state)
+
+ self.assertEqual(gym_env.get_state(), state)
+ gym_env.close()
+
@parameterized.parameters(*COMMON_PARAMETERS)
def test_step(self, multithreading):
num_envs = 5
|
PyEnvironment Methods Incompatible with TF
The docstring for `tf_py_environment.__getattr__` indicates that certain PyEnvironment methods might be incompatible with TF.
```python
def __getattr__(self, name: Text) -> Any:
"""Enables access attributes of the wrapped PyEnvironment.
Use with caution since methods of the PyEnvironment can be incompatible
with TF.
Args:
name: Name of the attribute.
Returns:
The attribute.
"""
if name in self.__dict__:
return getattr(self, name)
return getattr(self._env, name)
```
What makes a Py Environment method incompatible with tensorflow?
I ran across this issue when trying to call `.seed` on the `tf_py_environment`. I implemented a `.seed` function for my subclass of `py_environment`, but calling `.seed` on the wrapper doesn't lead to the `.seed` function of the subclass being called. Perhaps this is intentional?
|
0.0
|
27b851f4daad092345f07cd2525115a8f3ed5224
|
[
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_seed_gym_env0",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_seed_gym_env1",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_state_gym_env0",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_state_gym_env1"
] |
[
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_close_no_hang_after_init0",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_close_no_hang_after_init1",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_info_gym_env0",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_info_gym_env1",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_specs0",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_get_specs1",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_step0",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_step1",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_unstack_actions",
"tf_agents/environments/batched_py_environment_test.py::BatchedPyEnvironmentTest::test_unstack_nested_actions"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-01-21 14:46:51+00:00
|
apache-2.0
| 5,848 |
|
tensorly__tensorly-170
|
diff --git a/tensorly/backend/numpy_backend.py b/tensorly/backend/numpy_backend.py
index 45aead6..2688ae9 100644
--- a/tensorly/backend/numpy_backend.py
+++ b/tensorly/backend/numpy_backend.py
@@ -53,7 +53,6 @@ class NumpyBackend(Backend):
return np.sum(np.abs(tensor)**order, axis=axis)**(1 / order)
def kr(self, matrices, weights=None, mask=None):
- if mask is None: mask = 1
n_columns = matrices[0].shape[1]
n_factors = len(matrices)
@@ -66,7 +65,8 @@ class NumpyBackend(Backend):
if weights is not None:
matrices = [m if i else m*self.reshape(weights, (1, -1)) for i, m in enumerate(matrices)]
- return np.einsum(operation, *matrices).reshape((-1, n_columns))*mask
+ m = mask.reshape((-1, 1)) if mask is not None else 1
+ return np.einsum(operation, *matrices).reshape((-1, n_columns))*m
@property
def SVD_FUNS(self):
|
tensorly/tensorly
|
b39c65182d24874e154de2d1563d4882086f0641
|
diff --git a/tensorly/decomposition/tests/test_candecomp_parafac.py b/tensorly/decomposition/tests/test_candecomp_parafac.py
index e537743..dcd8c45 100644
--- a/tensorly/decomposition/tests/test_candecomp_parafac.py
+++ b/tensorly/decomposition/tests/test_candecomp_parafac.py
@@ -75,6 +75,21 @@ def test_parafac():
error = T.norm(tensor - rec, 2)/T.norm(tensor)
assert_(error < tol)
+
+def test_masked_parafac():
+ """Test for the masked CANDECOMP-PARAFAC decomposition.
+ This checks that a mask of 1's is identical to the unmasked case.
+ """
+ rng = check_random_state(1234)
+ tensor = T.tensor(rng.random_sample((3, 3, 3)))
+ mask = T.tensor(np.ones((3, 3, 3)))
+
+ mask_fact = parafac(tensor, rank=2, mask=mask)
+ fact = parafac(tensor, rank=2)
+ diff = kruskal_to_tensor(mask_fact) - kruskal_to_tensor(fact)
+ assert_(T.norm(diff) < 0.01, 'norm 2 of reconstruction higher than 0.01')
+
+
def test_non_negative_parafac():
"""Test for non-negative PARAFAC
|
Masked CP decomposition returns error in numpy backend kr
Hi, like milanlanlan in [Handling missing data in decomposition #4](https://github.com/tensorly/tensorly/issues/4#issuecomment-557899752) I am trying to decompose a tensor using the CP decomposition with missing values. Unfortunately, I also receive this error message:
> 5 mask_parafac[score_sparse > 0] = 1
6 print(mask_parafac)
7 rec_score_tensor = parafac(score_sparse, rank=4, mask = mask)
8
9 print(rec_score_tensor)
tensorly/decomposition/candecomp_parafac.py in parafac(tensor, rank, n_iter_max, init, svd, normalize_factors, tol, orthogonalise, random_state, verbose, return_errors, non_negative, mask)
183
184 if mask is not None:
185 tensor = tensor*mask + tl.kruskal_to_tensor((None, factors), mask=1-mask)
186
187 mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
tensorly/kruskal_tensor.py in kruskal_to_tensor(kruskal_tensor, mask)
186 T.transpose(khatri_rao(factors, skip_matrix=0)))
187 else:
188 full_tensor = T.sum(khatri_rao([factors[0]*weights]+factors[1:], mask=mask), axis=1)
189
190 return fold(full_tensor, 0, shape)
tensorly/tenalg/_khatri_rao.py in khatri_rao(matrices, weights, skip_matrix, reverse, mask)
96 # Note: we do NOT use .reverse() which would reverse matrices even outside this function
97
98 return T.kr(matrices, weights=weights, mask=mask)
tensorly/backend/__init__.py in inner(*args, **kwargs)
158
159 def inner(*args, **kwargs):
160 return _get_backend_method(name)(*args, **kwargs)
161
162 # We don't use `functools.wraps` here because some of the dispatched
tensorly/backend/numpy_backend.py in kr(self, matrices, weights, mask)
67 matrices = [m if i else m*self.reshape(weights, (1, -1)) for i, m in enumerate(matrices)]
68
69 return np.einsum(operation, *matrices).reshape((-1, n_columns))*mask
70 #tensor = np.einsum(operation, *matrices).reshape((-1, n_columns))
71 #return tensor* mask.reshape(tensor.shape)
ValueError: operands could not be broadcast together with shapes (80,4) (5,4,4) <
As the number of elements in both arrays is not equal it seems like a simple reshape will not solve the problem.
|
0.0
|
b39c65182d24874e154de2d1563d4882086f0641
|
[
"tensorly/decomposition/tests/test_candecomp_parafac.py::test_masked_parafac"
] |
[
"tensorly/decomposition/tests/test_candecomp_parafac.py::test_parafac",
"tensorly/decomposition/tests/test_candecomp_parafac.py::test_non_negative_parafac",
"tensorly/decomposition/tests/test_candecomp_parafac.py::test_sample_khatri_rao",
"tensorly/decomposition/tests/test_candecomp_parafac.py::test_randomised_parafac"
] |
{
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-19 01:28:26+00:00
|
bsd-3-clause
| 5,849 |
|
tensorly__tensorly-221
|
diff --git a/tensorly/decomposition/__init__.py b/tensorly/decomposition/__init__.py
index d43985d..9b4f4c0 100644
--- a/tensorly/decomposition/__init__.py
+++ b/tensorly/decomposition/__init__.py
@@ -8,7 +8,7 @@ from ._nn_cp import non_negative_parafac
from ._tucker import tucker, partial_tucker, non_negative_tucker, Tucker
from .robust_decomposition import robust_pca
from ._tt import TensorTrain, tensor_train, tensor_train_matrix
-from .parafac2 import parafac2, Parafac2
+from ._parafac2 import parafac2, Parafac2
from ._symmetric_cp import symmetric_parafac_power_iteration, symmetric_power_iteration, SymmetricCP
from ._cp_power import parafac_power_iteration, power_iteration, CPPower
diff --git a/tensorly/decomposition/parafac2.py b/tensorly/decomposition/_parafac2.py
similarity index 76%
rename from tensorly/decomposition/parafac2.py
rename to tensorly/decomposition/_parafac2.py
index 82fed36..eb0cadf 100644
--- a/tensorly/decomposition/parafac2.py
+++ b/tensorly/decomposition/_parafac2.py
@@ -285,95 +285,94 @@ def parafac2(tensor_slices, rank, n_iter_max=100, init='random', svd='numpy_svd'
class Parafac2(DecompositionMixin):
+ r"""PARAFAC2 decomposition [1]_ of a third order tensor via alternating least squares (ALS)
- def __init__(self, rank, n_iter_max=100, init='random', svd='numpy_svd', normalize_factors=False,
- tol=1e-8, random_state=None, verbose=False, n_iter_parafac=5):
- r"""PARAFAC2 decomposition [1]_ of a third order tensor via alternating least squares (ALS)
+ Computes a rank-`rank` PARAFAC2 decomposition of the third-order tensor defined by
+ `tensor_slices`. The decomposition is on the form :math:`(A [B_i] C)` such that the
+ i-th frontal slice, :math:`X_i`, of :math:`X` is given by
- Computes a rank-`rank` PARAFAC2 decomposition of the third-order tensor defined by
- `tensor_slices`. The decomposition is on the form :math:`(A [B_i] C)` such that the
- i-th frontal slice, :math:`X_i`, of :math:`X` is given by
+ .. math::
+
+ X_i = B_i diag(a_i) C^T,
+
+ where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are equal to
+ the :math:`i`-th row of the :math:`I \times R` factor matrix :math:`A`, :math:`B_i`
+ is a :math:`J_i \times R` factor matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}`
+ is constant for all :math:`i`, and :math:`C` is a :math:`K \times R` factor matrix.
+ To compute this decomposition, we reformulate the expression for :math:`B_i` such that
- .. math::
-
- X_i = B_i diag(a_i) C^T,
-
- where :math:`diag(a_i)` is the diagonal matrix whose nonzero entries are equal to
- the :math:`i`-th row of the :math:`I \times R` factor matrix :math:`A`, :math:`B_i`
- is a :math:`J_i \times R` factor matrix such that the cross product matrix :math:`B_{i_1}^T B_{i_1}`
- is constant for all :math:`i`, and :math:`C` is a :math:`K \times R` factor matrix.
- To compute this decomposition, we reformulate the expression for :math:`B_i` such that
+ .. math::
- .. math::
+ B_i = P_i B,
- B_i = P_i B,
+ where :math:`P_i` is a :math:`J_i \times R` orthogonal matrix and :math:`B` is a
+ :math:`R \times R` matrix.
- where :math:`P_i` is a :math:`J_i \times R` orthogonal matrix and :math:`B` is a
- :math:`R \times R` matrix.
+ An alternative formulation of the PARAFAC2 decomposition is that the tensor element
+ :math:`X_{ijk}` is given by
- An alternative formulation of the PARAFAC2 decomposition is that the tensor element
- :math:`X_{ijk}` is given by
+ .. math::
- .. math::
+ X_{ijk} = \sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
+
+ with the same constraints hold for :math:`B_i` as above.
+
- X_{ijk} = \sum_{r=1}^R A_{ir} B_{ijr} C_{kr},
-
- with the same constraints hold for :math:`B_i` as above.
+ Parameters
+ ----------
+ tensor_slices : ndarray or list of ndarrays
+ Either a third order tensor or a list of second order tensors that may have different number of rows.
+ Note that the second mode factor matrices are allowed to change over the first mode, not the
+ third mode as some other implementations use (see note below).
+ rank : int
+ Number of components.
+ n_iter_max : int
+ Maximum number of iteration
+ init : {'svd', 'random', CPTensor, Parafac2Tensor}
+ Type of factor matrix initialization. See `initialize_factors`.
+ svd : str, default is 'numpy_svd'
+ function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
+ normalize_factors : bool (optional)
+ If True, aggregate the weights of each factor in a 1D-tensor
+ of shape (rank, ), which will contain the norms of the factors. Note that
+ there may be some inaccuracies in the component weights.
+ tol : float, optional
+ (Default: 1e-8) Relative reconstruction error tolerance. The
+ algorithm is considered to have found the global minimum when the
+ reconstruction error is less than `tol`.
+ random_state : {None, int, np.random.RandomState}
+ verbose : int, optional
+ Level of verbosity
+ n_iter_parafac: int, optional
+ Number of PARAFAC iterations to perform for each PARAFAC2 iteration
+
+ Returns
+ -------
+ Parafac2Tensor : (weight, factors, projection_matrices)
+ * weights : 1D array of shape (rank, )
+ all ones if normalize_factors is False (default),
+ weights of the (normalized) factors otherwise
+ * factors : List of factors of the CP decomposition element `i` is of shape
+ (tensor.shape[i], rank)
+ * projection_matrices : List of projection matrices used to create evolving
+ factors.
+ References
+ ----------
+ .. [1] Kiers, H.A.L., ten Berge, J.M.F. and Bro, R. (1999),
+ PARAFAC2—Part I. A direct fitting algorithm for the PARAFAC2 model.
+ J. Chemometrics, 13: 275-294.
- Parameters
- ----------
- tensor_slices : ndarray or list of ndarrays
- Either a third order tensor or a list of second order tensors that may have different number of rows.
- Note that the second mode factor matrices are allowed to change over the first mode, not the
- third mode as some other implementations use (see note below).
- rank : int
- Number of components.
- n_iter_max : int
- Maximum number of iteration
- init : {'svd', 'random', CPTensor, Parafac2Tensor}
- Type of factor matrix initialization. See `initialize_factors`.
- svd : str, default is 'numpy_svd'
- function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
- normalize_factors : bool (optional)
- If True, aggregate the weights of each factor in a 1D-tensor
- of shape (rank, ), which will contain the norms of the factors. Note that
- there may be some inaccuracies in the component weights.
- tol : float, optional
- (Default: 1e-8) Relative reconstruction error tolerance. The
- algorithm is considered to have found the global minimum when the
- reconstruction error is less than `tol`.
- random_state : {None, int, np.random.RandomState}
- verbose : int, optional
- Level of verbosity
- n_iter_parafac: int, optional
- Number of PARAFAC iterations to perform for each PARAFAC2 iteration
-
- Returns
- -------
- Parafac2Tensor : (weight, factors, projection_matrices)
- * weights : 1D array of shape (rank, )
- all ones if normalize_factors is False (default),
- weights of the (normalized) factors otherwise
- * factors : List of factors of the CP decomposition element `i` is of shape
- (tensor.shape[i], rank)
- * projection_matrices : List of projection matrices used to create evolving
- factors.
-
- References
- ----------
- .. [1] Kiers, H.A.L., ten Berge, J.M.F. and Bro, R. (1999),
- PARAFAC2—Part I. A direct fitting algorithm for the PARAFAC2 model.
- J. Chemometrics, 13: 275-294.
-
- Notes
- -----
- This formulation of the PARAFAC2 decomposition is slightly different from the one in [1]_.
- The difference lies in that here, the second mode changes over the first mode, whereas in
- [1]_, the second mode changes over the third mode. We made this change since that means
- that the function accept both lists of matrices and a single nd-array as input without
- any reordering of the modes.
- """
+ Notes
+ -----
+ This formulation of the PARAFAC2 decomposition is slightly different from the one in [1]_.
+ The difference lies in that here, the second mode changes over the first mode, whereas in
+ [1]_, the second mode changes over the third mode. We made this change since that means
+ that the function accept both lists of matrices and a single nd-array as input without
+ any reordering of the modes.
+ """
+ def __init__(self, rank, n_iter_max=100, init='random', svd='numpy_svd', normalize_factors=False,
+ tol=1e-8, random_state=None, verbose=False, n_iter_parafac=5):
self.rank = rank
self.n_iter_max=n_iter_max
self.init=init
|
tensorly/tensorly
|
e51fd09f3bacc77641aef76128330c479c5ce7d7
|
diff --git a/tensorly/decomposition/tests/test_parafac2.py b/tensorly/decomposition/tests/test_parafac2.py
index db925e1..8c6f982 100644
--- a/tensorly/decomposition/tests/test_parafac2.py
+++ b/tensorly/decomposition/tests/test_parafac2.py
@@ -7,7 +7,7 @@ import tensorly as tl
from ...random import check_random_state, random_parafac2
from ... import backend as T
from ...testing import assert_array_equal, assert_
-from ..parafac2 import parafac2, initialize_decomposition, _pad_by_zeros
+from .._parafac2 import parafac2, initialize_decomposition, _pad_by_zeros
from ...parafac2_tensor import parafac2_to_tensor, parafac2_to_slices
|
Enhancement: Improve naming consistency
Almost all decomposition implementations are stored in a file with the name `_{{decomposition_name}}`. The exceptions are PARAFAC2 and Robust PCA. Should we rename these modules to `_parafac2.py` and `_robust_decomposition.py`, or is there a reason for these exceptions?
|
0.0
|
e51fd09f3bacc77641aef76128330c479c5ce7d7
|
[
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2[True-random]",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2[True-svd]",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2[False-random]",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2[False-svd]",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2_slice_and_tensor_input",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2_normalize_factors",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2_init_valid",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2_init_error",
"tensorly/decomposition/tests/test_parafac2.py::test_parafac2_to_tensor",
"tensorly/decomposition/tests/test_parafac2.py::test_pad_by_zeros"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-01 19:21:08+00:00
|
bsd-3-clause
| 5,850 |
|
tensorly__tensorly-320
|
diff --git a/tensorly/backend/core.py b/tensorly/backend/core.py
index ae9beb6..41c483a 100644
--- a/tensorly/backend/core.py
+++ b/tensorly/backend/core.py
@@ -1079,6 +1079,9 @@ class Backend(object):
S = np.sqrt(np.clip(S, 0, None))
S = np.clip(S, np.finfo(S.dtype).eps, None) # To avoid divide by zero warning on next line
V = np.dot(matrix.T.conj(), U * np.where(np.abs(S) <= np.finfo(S.dtype).eps, 0, 1/S)[None, :])
+ U, S, V = U[:, ::-1], S[::-1], V[:, ::-1]
+ V, R = np.linalg.qr(V)
+ V = V * (2*(np.diag(R) >= 0) - 1) # we can't use np.sign because np.sign(0) == 0
else:
S, V = scipy.sparse.linalg.eigsh(
np.dot(matrix.T.conj(), matrix), k=n_eigenvecs, which='LM', v0=v0
@@ -1086,9 +1089,11 @@ class Backend(object):
S = np.sqrt(np.clip(S, 0, None))
S = np.clip(S, np.finfo(S.dtype).eps, None)
U = np.dot(matrix, V) * np.where(np.abs(S) <= np.finfo(S.dtype).eps, 0, 1/S)[None, :]
+ U, S, V = U[:, ::-1], S[::-1], V[:, ::-1]
+ U, R = np.linalg.qr(U)
+ U = U * (2*(np.diag(R) >= 0) - 1)
# WARNING: here, V is still the transpose of what it should be
- U, S, V = U[:, ::-1], S[::-1], V[:, ::-1]
V = V.T.conj()
if flip:
diff --git a/tensorly/backend/pytorch_backend.py b/tensorly/backend/pytorch_backend.py
index 267cda3..b96e7d7 100644
--- a/tensorly/backend/pytorch_backend.py
+++ b/tensorly/backend/pytorch_backend.py
@@ -148,6 +148,10 @@ class PyTorchBackend(Backend):
def stack(arrays, axis=0):
return torch.stack(arrays, dim=axis)
+ @staticmethod
+ def diag(tensor, k=0):
+ return torch.diag(tensor, diagonal=k)
+
@staticmethod
def sort(tensor, axis, descending = False):
if axis is None:
@@ -212,7 +216,7 @@ class PyTorchBackend(Backend):
for name in ['float64', 'float32', 'int64', 'int32', 'complex128', 'complex64',
'is_tensor', 'ones', 'zeros', 'any', 'trace', 'cumsum', 'tensordot',
'zeros_like', 'reshape', 'eye', 'max', 'min', 'prod', 'abs', 'matmul',
- 'sqrt', 'sign', 'where', 'conj', 'diag', 'finfo', 'einsum', 'log2', 'sin', 'cos']:
+ 'sqrt', 'sign', 'where', 'conj', 'finfo', 'einsum', 'log2', 'sin', 'cos']:
PyTorchBackend.register_method(name, getattr(torch, name))
diff --git a/tensorly/backend/tensorflow_backend.py b/tensorly/backend/tensorflow_backend.py
index 3da3142..1d89fb5 100644
--- a/tensorly/backend/tensorflow_backend.py
+++ b/tensorly/backend/tensorflow_backend.py
@@ -191,7 +191,7 @@ _FUN_NAMES = [
(np.complex64, 'complex64'),
(tf.ones, 'ones'),
(tf.zeros, 'zeros'),
- (tf.linalg.tensor_diag, 'diag'),
+ (tf.linalg.diag, 'diag'),
(tf.zeros_like, 'zeros_like'),
(tf.eye, 'eye'),
(tf.reshape, 'reshape'),
|
tensorly/tensorly
|
5a6992ad14ec64d59a8b6d341ae14de98092687b
|
diff --git a/tensorly/tests/test_backend.py b/tensorly/tests/test_backend.py
index 04818a3..c73c24f 100644
--- a/tensorly/tests/test_backend.py
+++ b/tensorly/tests/test_backend.py
@@ -161,7 +161,7 @@ def test_svd():
assert_(left_orthogonality_error <= tol_orthogonality,
msg='Left eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
name, tl.get_backend(), n, s))
- right_orthogonality_error = T.norm(T.dot(T.transpose(fU), fU) - T.eye(n))
+ right_orthogonality_error = T.norm(T.dot(fV, T.transpose(fV)) - T.eye(n))
assert_(right_orthogonality_error <= tol_orthogonality,
msg='Right eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
name, tl.get_backend(), n, s))
@@ -180,6 +180,14 @@ def test_svd():
assert_(np.isfinite(T.to_numpy(U)).all(), msg="Left singular vectors are not finite")
assert_(np.isfinite(T.to_numpy(V)).all(), msg="Right singular vectors are not finite")
+ # Test orthonormality when max_dim > n_eigenvecs > matrix_rank
+ matrix = tl.dot(tl.randn((4, 2), seed=1), tl.randn((2, 4), seed=12))
+ U, S, V = tl.partial_svd(matrix, n_eigenvecs=3, random_state=0)
+ left_orthogonality_error = T.norm(T.dot(T.transpose(U), U) - T.eye(3))
+ assert_(left_orthogonality_error <= tol_orthogonality)
+ right_orthogonality_error = T.norm(T.dot(V, T.transpose(V)) - T.eye(3))
+ assert_(right_orthogonality_error <= tol_orthogonality)
+
# Test if partial_svd returns the same result for the same setting
matrix = T.tensor(np.random.random((20, 5)))
random_state = np.random.RandomState(0)
|
[BUG] singular vectors in `tl.partial_svd` are not orthonormal
When `n_eigenvecs` is greater than the input matrix rank, the returned singular values are not orthonormal. However, the result is still a valid low-rank decomposition (<img src="https://render.githubusercontent.com/render/math?math=A = USV">).
```python3
import tensorly as tl
import numpy as np
np.random.seed(0)
a = tl.dot(tl.randn((4, 2)), tl.randn((2, 4)))
u, s, v = tl.partial_svd(a, n_eigenvecs=3)
print(tl.dot(u.T, u))
```
```
[[ 1. -0. 0.]
[-0. 1. 0.]
[ 0. 0. 0.]]
```
<img src="https://render.githubusercontent.com/render/math?math=U^\top U \neq I"> ☹️.
---
To fix this bug we could use the Gram–Schmidt process and extend the singular vectors basis corresponding to the zero singular values.
|
0.0
|
5a6992ad14ec64d59a8b6d341ae14de98092687b
|
[
"tensorly/tests/test_backend.py::test_svd"
] |
[
"tensorly/tests/test_backend.py::test_backend_and_tensorly_module_attributes",
"tensorly/tests/test_backend.py::test_tensor_creation",
"tensorly/tests/test_backend.py::test_svd_time",
"tensorly/tests/test_backend.py::test_randomized_range_finder",
"tensorly/tests/test_backend.py::test_shape",
"tensorly/tests/test_backend.py::test_ndim",
"tensorly/tests/test_backend.py::test_norm",
"tensorly/tests/test_backend.py::test_clip",
"tensorly/tests/test_backend.py::test_where",
"tensorly/tests/test_backend.py::test_lstsq",
"tensorly/tests/test_backend.py::test_qr",
"tensorly/tests/test_backend.py::test_prod",
"tensorly/tests/test_backend.py::test_index_update"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-08-28 06:51:58+00:00
|
bsd-3-clause
| 5,851 |
|
tensorly__tensorly-455
|
diff --git a/doc/modules/api.rst b/doc/modules/api.rst
index ad8c2a1..d6004d6 100644
--- a/doc/modules/api.rst
+++ b/doc/modules/api.rst
@@ -328,6 +328,10 @@ Note that these are currently experimental and may change in the future.
Parafac2
SymmetricCP
ConstrainedCP
+ TensorTrain
+ TensorRing
+ TensorTrainMatrix
+
Functions
---------
@@ -352,6 +356,7 @@ Functions
robust_pca
tensor_train
tensor_train_matrix
+ tensor_ring
parafac2
constrained_parafac
diff --git a/tensorly/backend/__init__.py b/tensorly/backend/__init__.py
index 35b44f5..326dde0 100644
--- a/tensorly/backend/__init__.py
+++ b/tensorly/backend/__init__.py
@@ -105,6 +105,7 @@ class BackendManager(types.ModuleType):
"asinh",
"acosh",
"atanh",
+ "partial_svd",
]
_attributes = [
"int64",
diff --git a/tensorly/backend/core.py b/tensorly/backend/core.py
index fb43e7e..7b4a1e1 100644
--- a/tensorly/backend/core.py
+++ b/tensorly/backend/core.py
@@ -1262,3 +1262,11 @@ class Backend(object):
def atanh(self, x):
"""Return the arctanh of x."""
return self.arctanh(x)
+
+ def partial_svd(self, *args, **kwargs):
+ msg = (
+ "partial_svd is no longer used. "
+ "Please use tensorly.tenalg.svd_interface instead, "
+ "it provides a unified interface to all available SVD implementations."
+ )
+ raise NotImplementedError(msg)
diff --git a/tensorly/decomposition/__init__.py b/tensorly/decomposition/__init__.py
index 61332d3..7998a94 100644
--- a/tensorly/decomposition/__init__.py
+++ b/tensorly/decomposition/__init__.py
@@ -13,8 +13,9 @@ from ._tucker import (
Tucker,
)
from .robust_decomposition import robust_pca
-from ._tt import TensorTrain, tensor_train, tensor_train_matrix
-from ._tr import tensor_ring
+from ._tt import tensor_train, tensor_train_matrix
+from ._tt import TensorTrain, TensorTrainMatrix
+from ._tr import tensor_ring, TensorRing
from ._parafac2 import parafac2, Parafac2
from ._symmetric_cp import (
symmetric_parafac_power_iteration,
diff --git a/tensorly/decomposition/_tt.py b/tensorly/decomposition/_tt.py
index 961c516..9929193 100644
--- a/tensorly/decomposition/_tt.py
+++ b/tensorly/decomposition/_tt.py
@@ -136,6 +136,40 @@ def tensor_train_matrix(tensor, rank, svd="truncated_svd", verbose=False):
class TensorTrain(DecompositionMixin):
+ """Decompose a tensor into a matrix in tt-format
+
+ Parameters
+ ----------
+ tensor : tensorized matrix
+ if your input matrix is of size (4, 9) and your tensorized_shape (2, 2, 3, 3)
+ then tensor should be tl.reshape(matrix, (2, 2, 3, 3))
+ rank : 'same', float or int tuple
+ - if 'same' creates a decomposition with the same number of parameters as `tensor`
+ - if float, creates a decomposition with `rank` x the number of parameters of `tensor`
+ - otherwise, the actual rank to be used, e.g. (1, rank_2, ..., 1) of size tensor.ndim//2. Note that boundary conditions dictate that the first rank = last rank = 1.
+ svd : str, default is 'truncated_svd'
+ function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
+ verbose : boolean, optional
+ level of verbosity
+
+ Returns
+ -------
+ tt_matrix
+ """
+
+ def __init__(self, rank, svd="truncated_svd", verbose=False):
+ self.rank = rank
+ self.svd = svd
+ self.verbose = verbose
+
+ def fit_transform(self, tensor):
+ self.decomposition_ = tensor_train(
+ tensor, rank=self.rank, svd=self.svd, verbose=self.verbose
+ )
+ return self.decomposition_
+
+
+class TensorTrainMatrix(DecompositionMixin):
"""TT decomposition via recursive SVD
Decomposes `input_tensor` into a sequence of order-3 tensors (factors)
@@ -169,7 +203,7 @@ class TensorTrain(DecompositionMixin):
self.verbose = verbose
def fit_transform(self, tensor):
- self.decomposition_ = tensor_train(
+ self.decomposition_ = tensor_train_matrix(
tensor, rank=self.rank, svd=self.svd, verbose=self.verbose
)
return self.decomposition_
|
tensorly/tensorly
|
a0d58621349a0100fef6ada87ead5646ba37e5f4
|
diff --git a/tensorly/decomposition/tests/test_tt_decomposition.py b/tensorly/decomposition/tests/test_tt_decomposition.py
index 48675da..1bf7322 100644
--- a/tensorly/decomposition/tests/test_tt_decomposition.py
+++ b/tensorly/decomposition/tests/test_tt_decomposition.py
@@ -1,6 +1,6 @@
import pytest
import tensorly as tl
-from .._tt import tensor_train, tensor_train_matrix, TensorTrain
+from .._tt import tensor_train, tensor_train_matrix, TensorTrain, TensorTrainMatrix
from ...tt_matrix import tt_matrix_to_tensor
from ...random import random_tt
from ...testing import (
@@ -89,10 +89,14 @@ def test_tensor_train(monkeypatch):
# TODO: Remove once MXNet supports transpose for > 6th order tensors
@skip_mxnet
-def test_tensor_train_matrix():
+def test_tensor_train_matrix(monkeypatch):
"""Test for tensor_train_matrix decomposition"""
tensor = random_tt((2, 2, 2, 3, 3, 3), rank=2, full=True)
tt = tensor_train_matrix(tensor, 10)
tt_rec = tt_matrix_to_tensor(tt)
assert_array_almost_equal(tensor, tt_rec, decimal=4)
+
+ assert_class_wrapper_correctly_passes_arguments(
+ monkeypatch, tensor_train_matrix, TensorTrainMatrix, ignore_args={}, rank=3
+ )
|
Add a deprecation for tl.partial_svd to suggest tl.tenalg.svd_interface
The issue in the tests comes from the fact that we changed the SVD interface in #429, the best would be to use `tensorly.tenalg.svd_interface`. @aarmey perhaps we should deprecate tl.partial_svd or at least raise an error if users try to use it, to let them know they should switch to the new interface?
_Originally posted by @JeanKossaifi in https://github.com/tensorly/tensorly/issues/411#issuecomment-1217122493_
|
0.0
|
a0d58621349a0100fef6ada87ead5646ba37e5f4
|
[
"tensorly/decomposition/tests/test_tt_decomposition.py::test_tensor_train",
"tensorly/decomposition/tests/test_tt_decomposition.py::test_tensor_train_matrix"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-31 20:54:30+00:00
|
bsd-3-clause
| 5,852 |
|
terean-dspd__numbers-2-rus-3
|
diff --git a/num2rus/converter.py b/num2rus/converter.py
index 89d38e1..212840d 100644
--- a/num2rus/converter.py
+++ b/num2rus/converter.py
@@ -131,7 +131,7 @@ NUMBERS = {
40: "сорок",
50: "пятьдесят",
60: "шестьдесят",
- 70: "семьдесять",
+ 70: "семьдесят",
80: "восемьдесят",
90: "девяносто",
},
diff --git a/num2rus/main.py b/num2rus/main.py
index e586db5..6049e24 100644
--- a/num2rus/main.py
+++ b/num2rus/main.py
@@ -30,7 +30,7 @@ def chopper(num: int) -> Tuple[str, str]:
marks them as
`миллиард`, `миллион`, `тысяча`, `сотня`, `единица`, `десятка`
"""
- num_str = str(num)
+ num_str = str(num) # '20'
while len(num_str) > 0:
if 13 > len(num_str) >= 10:
step = len(num_str) - 9
@@ -50,9 +50,12 @@ def chopper(num: int) -> Tuple[str, str]:
if len(num_str) == 1:
yield num_str, 'единица'
break
- if len(num_str) == 2:
+ if len(num_str) == 2 and num_str[0] == '1':
yield num_str, 'десятка'
break
+ else:
+ yield num_str, 'сотня'
+ break
def decimal_parser(number_str: str, zero_on: bool = True) -> Tuple[str, str]:
@@ -157,7 +160,6 @@ def converter(number: float, zero_on: bool = True) -> str:
dec_str = "{0:.2f}".format(decimal - integet_part)
decimal_part: str = dec_str.split('.')[1]
result = ''
- tl_g = ''
for number, size in chopper(integet_part):
string, tl, rub = main_parser(number, size)
result += string + tl + ' '
@@ -169,4 +171,4 @@ def converter(number: float, zero_on: bool = True) -> str:
if __name__ == "__main__":
# num = input()
- converter(34102)
+ converter(20)
|
terean-dspd/numbers-2-rus
|
a6ee2b20be1c34b3b306591e2fa6e1fb68771326
|
diff --git a/num2rus/tests.py b/num2rus/tests.py
index d3ac079..02acd9b 100644
--- a/num2rus/tests.py
+++ b/num2rus/tests.py
@@ -27,6 +27,43 @@ class Testconverter(unittest.TestCase):
result = converter(10, zero_on=False)
self.assertEqual(result, 'десять рублей')
+ def test_20(self):
+ result = converter(20, zero_on=False)
+ self.assertEqual(result, 'двадцать рублей')
+
+ def test_21(self):
+ result = converter(21, zero_on=False)
+ self.assertEqual(result, 'двадцать один рубль')
+
+ def test_25(self):
+ result = converter(25, zero_on=False)
+ self.assertEqual(result, 'двадцать пять рублей')
+
+ def test_30(self):
+ result = converter(30, zero_on=False)
+ self.assertEqual(result, 'тридцать рублей')
+
+ def test_33(self):
+ result = converter(33, zero_on=False)
+ self.assertEqual(result, 'тридцать три рубля')
+
+ def test_43(self):
+ result = converter(43, zero_on=False)
+ self.assertEqual(result, 'сорок три рубля')
+
+ def test_50(self):
+ result = converter(50, zero_on=False)
+ self.assertEqual(result, 'пятьдесят рублей')
+ def test_75(self):
+ result = converter(75, zero_on=False)
+ self.assertEqual(result, 'семьдесят пять рублей')
+
+ def test_90(self):
+ result = converter(90, zero_on=False)
+ self.assertEqual(result, 'девяносто рублей')
+ def test_99(self):
+ result = converter(99, zero_on=False)
+ self.assertEqual(result, 'девяносто девять рублей')
def test_100(self):
result = converter(100, zero_on=False)
self.assertEqual(result, 'сто рублей')
@@ -246,9 +283,6 @@ class TestconverterNonZeroKops(unittest.TestCase):
result = converter(123.40)
self.assertEqual(result, 'сто двадцать три рубля сорок копеек')
- def test_1_z_123_40(self):
- result = converter(123.40)
- self.assertEqual(result, 'сто двадцать три рубля сорок копеек')
def test_1_z_133_41(self):
result = converter(133.41)
|
Ошибка конвертации для чисел от 20 до 99

|
0.0
|
a6ee2b20be1c34b3b306591e2fa6e1fb68771326
|
[
"num2rus/tests.py::Testconverter::test_20",
"num2rus/tests.py::Testconverter::test_21",
"num2rus/tests.py::Testconverter::test_25",
"num2rus/tests.py::Testconverter::test_30",
"num2rus/tests.py::Testconverter::test_33",
"num2rus/tests.py::Testconverter::test_43",
"num2rus/tests.py::Testconverter::test_50",
"num2rus/tests.py::Testconverter::test_75",
"num2rus/tests.py::Testconverter::test_90",
"num2rus/tests.py::Testconverter::test_99"
] |
[
"num2rus/tests.py::Testconverter::test_1",
"num2rus/tests.py::Testconverter::test_10",
"num2rus/tests.py::Testconverter::test_100",
"num2rus/tests.py::Testconverter::test_100000",
"num2rus/tests.py::Testconverter::test_100100",
"num2rus/tests.py::Testconverter::test_100101",
"num2rus/tests.py::Testconverter::test_100102",
"num2rus/tests.py::Testconverter::test_100_101_102",
"num2rus/tests.py::Testconverter::test_101",
"num2rus/tests.py::Testconverter::test_101102",
"num2rus/tests.py::Testconverter::test_101_101_102",
"num2rus/tests.py::Testconverter::test_111",
"num2rus/tests.py::Testconverter::test_11100",
"num2rus/tests.py::Testconverter::test_1120",
"num2rus/tests.py::Testconverter::test_115",
"num2rus/tests.py::Testconverter::test_120",
"num2rus/tests.py::Testconverter::test_1_100_101",
"num2rus/tests.py::Testconverter::test_1_100_102",
"num2rus/tests.py::Testconverter::test_1_101_102",
"num2rus/tests.py::Testconverter::test_2",
"num2rus/tests.py::Testconverter::test_3",
"num2rus/tests.py::Testconverter::test_34000",
"num2rus/tests.py::Testconverter::test_34102",
"num2rus/tests.py::Testconverter::test_34103",
"num2rus/tests.py::Testconverter::test_5120",
"num2rus/tests.py::Testconverter::test_7",
"num2rus/tests.py::TestconverterZeroKops::test_1",
"num2rus/tests.py::TestconverterZeroKops::test_10",
"num2rus/tests.py::TestconverterZeroKops::test_100",
"num2rus/tests.py::TestconverterZeroKops::test_100000",
"num2rus/tests.py::TestconverterZeroKops::test_100100",
"num2rus/tests.py::TestconverterZeroKops::test_100101",
"num2rus/tests.py::TestconverterZeroKops::test_100102",
"num2rus/tests.py::TestconverterZeroKops::test_100_101_102",
"num2rus/tests.py::TestconverterZeroKops::test_101",
"num2rus/tests.py::TestconverterZeroKops::test_101102",
"num2rus/tests.py::TestconverterZeroKops::test_101_101_102",
"num2rus/tests.py::TestconverterZeroKops::test_111",
"num2rus/tests.py::TestconverterZeroKops::test_11100",
"num2rus/tests.py::TestconverterZeroKops::test_1120",
"num2rus/tests.py::TestconverterZeroKops::test_115",
"num2rus/tests.py::TestconverterZeroKops::test_120",
"num2rus/tests.py::TestconverterZeroKops::test_1_100_101",
"num2rus/tests.py::TestconverterZeroKops::test_1_100_102",
"num2rus/tests.py::TestconverterZeroKops::test_1_101_102",
"num2rus/tests.py::TestconverterZeroKops::test_2",
"num2rus/tests.py::TestconverterZeroKops::test_3",
"num2rus/tests.py::TestconverterZeroKops::test_34000",
"num2rus/tests.py::TestconverterZeroKops::test_34102",
"num2rus/tests.py::TestconverterZeroKops::test_34103",
"num2rus/tests.py::TestconverterZeroKops::test_5120",
"num2rus/tests.py::TestconverterZeroKops::test_7",
"num2rus/tests.py::TestconverterNonZeroKops::test_10_z_21",
"num2rus/tests.py::TestconverterNonZeroKops::test_1_z_01",
"num2rus/tests.py::TestconverterNonZeroKops::test_1_z_10",
"num2rus/tests.py::TestconverterNonZeroKops::test_1_z_123_40",
"num2rus/tests.py::TestconverterNonZeroKops::test_1_z_131_40",
"num2rus/tests.py::TestconverterNonZeroKops::test_1_z_133_41",
"num2rus/tests.py::TestconverterNonZeroKops::test_2_z_02",
"num2rus/tests.py::TestconverterNonZeroKops::test_3_z_07",
"num2rus/tests.py::TestconverterNonZeroKops::test_3_z_08",
"num2rus/tests.py::TestconverterNonZeroKops::test_7_z_11"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-29 19:32:57+00:00
|
mit
| 5,853 |
|
tern-tools__tern-764
|
diff --git a/tern/analyze/common.py b/tern/analyze/common.py
index df4889a..a9de792 100644
--- a/tern/analyze/common.py
+++ b/tern/analyze/common.py
@@ -31,9 +31,11 @@ logger = logging.getLogger(constants.logger_name)
def get_shell_commands(shell_command_line):
- '''Given a shell command line, get a list of Command objects'''
+ '''Given a shell command line, get a list of Command objects and report on
+ branch statements'''
statements = general.split_command(shell_command_line)
command_list = []
+ branch_report = ''
# traverse the statements, pick out the loop and commands.
for stat in statements:
if 'command' in stat:
@@ -43,7 +45,13 @@ def get_shell_commands(shell_command_line):
for st in loop_stat:
if 'command' in st:
command_list.append(Command(st['command']))
- return command_list
+ elif 'branch' in stat:
+ branch_report = branch_report + '\n'.join(stat['content']) + '\n\n'
+ if branch_report:
+ # add prefix
+ branch_report = '\nNon-deterministic branching statement: \n' + \
+ branch_report
+ return command_list, branch_report
def load_from_cache(layer, redo=False):
@@ -478,7 +486,7 @@ def filter_install_commands(shell_command_line):
3. Return installed command objects, and messages for ignored commands
and unrecognized commands'''
report = ''
- command_list = get_shell_commands(shell_command_line)
+ command_list, branch_report = get_shell_commands(shell_command_line)
for command in command_list:
command_lib.set_command_attrs(command)
ignore_msgs, filter1 = remove_ignored_commands(command_list)
@@ -487,7 +495,8 @@ def filter_install_commands(shell_command_line):
report = report + formats.ignored + ignore_msgs
if unrec_msgs:
report = report + formats.unrecognized + unrec_msgs
-
+ if branch_report:
+ report = report + branch_report
return consolidate_commands(filter2), report
|
tern-tools/tern
|
044dc470ec5be8aacbc085a5ae307c608ff13255
|
diff --git a/tests/test_analyze_common.py b/tests/test_analyze_common.py
index e40445c..82aba50 100644
--- a/tests/test_analyze_common.py
+++ b/tests/test_analyze_common.py
@@ -32,10 +32,18 @@ class TestAnalyzeCommon(unittest.TestCase):
del self.test_dockerfile
def testGetShellCommands(self):
- command = common.get_shell_commands("yum install nfs-utils")
+ command, _ = common.get_shell_commands("yum install nfs-utils")
self.assertEqual(type(command), list)
self.assertEqual(len(command), 1)
self.assertEqual(command[0].options, self.command1.options)
+ # test on branching command
+ branching_script = "if [ -z $var ]; then yum install nfs-utils; fi"
+ branch_command, report = common.get_shell_commands(branching_script)
+ self.assertEqual(type(branch_command), list)
+ # we will ignore branching command, so len should be 0
+ self.assertEqual(len(branch_command), 0)
+ # and the report should not be None
+ self.assertTrue(report)
def testLoadFromCache(self):
'''Given a layer object, populate the given layer in case the cache isn't empty'''
|
Report RUN command statements that are non-deterministic like if and case statements
**Describe the Feature**
Currently, the report notices just says that if, case and for statements are unrecognizable. It would be nice to add notices for non-deterministic branching statements like if and case statements as you could only know the status of the branch at build time.
**Implementation Changes**
The new shell script parser produces a dictionary that will identify if and case statements. We could just look at this object and extract the if and case statements to create notices for them.
|
0.0
|
044dc470ec5be8aacbc085a5ae307c608ff13255
|
[
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetShellCommands"
] |
[
"tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithDifferentCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithSameCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testFilterInstallCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithInstallFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithRemoveFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFilesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadNoticesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadPackagesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithIgnoreFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithoutIgnoreFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithoutFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testSaveToCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithPackages",
"tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithoutPackages"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-11 01:09:53+00:00
|
bsd-2-clause
| 5,854 |
|
tern-tools__tern-768
|
diff --git a/tern/analyze/common.py b/tern/analyze/common.py
index df4889a..e5835c9 100644
--- a/tern/analyze/common.py
+++ b/tern/analyze/common.py
@@ -31,9 +31,11 @@ logger = logging.getLogger(constants.logger_name)
def get_shell_commands(shell_command_line):
- '''Given a shell command line, get a list of Command objects'''
+ '''Given a shell command line, get a list of Command objects and report on
+ branch statements'''
statements = general.split_command(shell_command_line)
command_list = []
+ branch_report = ''
# traverse the statements, pick out the loop and commands.
for stat in statements:
if 'command' in stat:
@@ -43,7 +45,13 @@ def get_shell_commands(shell_command_line):
for st in loop_stat:
if 'command' in st:
command_list.append(Command(st['command']))
- return command_list
+ elif 'branch' in stat:
+ branch_report = branch_report + '\n'.join(stat['content']) + '\n\n'
+ if branch_report:
+ # add prefix
+ branch_report = '\nNon-deterministic branching statement: \n' + \
+ branch_report
+ return command_list, branch_report
def load_from_cache(layer, redo=False):
@@ -201,7 +209,7 @@ def get_os_release(base_layer):
return pretty_name.strip('"')
-def collate_list_metadata(shell, listing):
+def collate_list_metadata(shell, listing, work_dir):
'''Given the shell and the listing for the package manager, collect
metadata that gets returned as a list'''
pkg_dict = {}
@@ -212,7 +220,7 @@ def collate_list_metadata(shell, listing):
return pkg_dict, msgs, warnings
for item in command_lib.base_keys:
if item in listing.keys():
- items, msg = command_lib.get_pkg_attr_list(shell, listing[item])
+ items, msg = command_lib.get_pkg_attr_list(shell, listing[item], work_dir)
msgs = msgs + msg
pkg_dict.update({item: items})
else:
@@ -290,7 +298,7 @@ def get_deb_package_licenses(deb_copyrights):
return deb_licenses
-def add_base_packages(image_layer, binary, shell):
+def add_base_packages(image_layer, binary, shell, work_dir=None):
'''Given the image layer, the binary to invoke and shell:
1. get the listing from the base.yml
2. Invoke any commands against the base layer
@@ -313,7 +321,7 @@ def add_base_packages(image_layer, binary, shell):
image_layer.origins.add_notice_to_origins(
origin_layer, Notice(snippet_msg, 'info'))
# get all the packages in the base layer
- pkg_dict, invoke_msg, warnings = collate_list_metadata(shell, listing)
+ pkg_dict, invoke_msg, warnings = collate_list_metadata(shell, listing, work_dir)
if listing.get("pkg_format") == "deb":
pkg_dict["pkg_licenses"] = get_deb_package_licenses(
@@ -338,7 +346,7 @@ def add_base_packages(image_layer, binary, shell):
listing_key=binary), 'error'))
-def fill_package_metadata(pkg_obj, pkg_listing, shell):
+def fill_package_metadata(pkg_obj, pkg_listing, shell, work_dir):
'''Given a Package object and the Package listing from the command
library, fill in the attribute value returned from looking up the
data and methods of the package listing.
@@ -351,7 +359,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell):
pkg_listing, 'version')
if version_listing:
version_list, invoke_msg = command_lib.get_pkg_attr_list(
- shell, version_listing, package_name=pkg_obj.name)
+ shell, version_listing, work_dir, package_name=pkg_obj.name)
if version_list:
pkg_obj.version = version_list[0]
else:
@@ -365,7 +373,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell):
pkg_listing, 'license')
if license_listing:
license_list, invoke_msg = command_lib.get_pkg_attr_list(
- shell, license_listing, package_name=pkg_obj.name)
+ shell, license_listing, work_dir, package_name=pkg_obj.name)
if license_list:
pkg_obj.license = license_list[0]
else:
@@ -379,7 +387,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell):
pkg_listing, 'proj_url')
if url_listing:
url_list, invoke_msg = command_lib.get_pkg_attr_list(
- shell, url_listing, package_name=pkg_obj.name)
+ shell, url_listing, work_dir, package_name=pkg_obj.name)
if url_list:
pkg_obj.proj_url = url_list[0]
else:
@@ -390,7 +398,7 @@ def fill_package_metadata(pkg_obj, pkg_listing, shell):
origin_str, Notice(listing_msg, 'warning'))
-def get_package_dependencies(package_listing, package_name, shell):
+def get_package_dependencies(package_listing, package_name, shell, work_dir=None):
'''The package listing is the result of looking up the command name in the
command library. Given this listing, the package name and the shell
return a list of package dependency names'''
@@ -398,7 +406,7 @@ def get_package_dependencies(package_listing, package_name, shell):
package_listing, 'deps')
if deps_listing:
deps_list, invoke_msg = command_lib.get_pkg_attr_list(
- shell, deps_listing, package_name=package_name)
+ shell, deps_listing, work_dir, package_name=package_name)
if deps_list:
return list(set(deps_list)), ''
return [], invoke_msg
@@ -457,6 +465,7 @@ def consolidate_commands(command_list):
new_list.append(command_list.pop(0))
while command_list:
+ # match the first command with its following commands.
first = command_list.pop(0)
for _ in range(0, len(command_list)):
second = command_list.pop(0)
@@ -465,8 +474,11 @@ def consolidate_commands(command_list):
new_list.append(second)
else:
if not first.merge(second):
- command_list.append(first)
- new_list.append(first)
+ # Unable to merge second, we should keep second command.
+ command_list.append(second)
+ # after trying to merge with all following commands, add first command
+ # to the new_dict.
+ new_list.append(first)
return new_list
@@ -478,7 +490,7 @@ def filter_install_commands(shell_command_line):
3. Return installed command objects, and messages for ignored commands
and unrecognized commands'''
report = ''
- command_list = get_shell_commands(shell_command_line)
+ command_list, branch_report = get_shell_commands(shell_command_line)
for command in command_list:
command_lib.set_command_attrs(command)
ignore_msgs, filter1 = remove_ignored_commands(command_list)
@@ -487,11 +499,12 @@ def filter_install_commands(shell_command_line):
report = report + formats.ignored + ignore_msgs
if unrec_msgs:
report = report + formats.unrecognized + unrec_msgs
-
+ if branch_report:
+ report = report + branch_report
return consolidate_commands(filter2), report
-def add_snippet_packages(image_layer, command, pkg_listing, shell):
+def add_snippet_packages(image_layer, command, pkg_listing, shell, work_dir):
'''Given an image layer object, a command object, the package listing
and the shell used to invoke commands, add package metadata to the layer
object. We assume the filesystem is already mounted and ready
@@ -524,7 +537,7 @@ def add_snippet_packages(image_layer, command, pkg_listing, shell):
# get package metadata for each package name
for pkg_name in unique_pkgs:
pkg = Package(pkg_name)
- fill_package_metadata(pkg, pkg_invoke, shell)
+ fill_package_metadata(pkg, pkg_invoke, shell, work_dir)
image_layer.add_package(pkg)
diff --git a/tern/analyze/docker/analyze.py b/tern/analyze/docker/analyze.py
index 6c2bccc..b112edb 100644
--- a/tern/analyze/docker/analyze.py
+++ b/tern/analyze/docker/analyze.py
@@ -106,7 +106,12 @@ def analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj=None,
dfile_lock=False):
# get packages for subsequent layers
curr_layer = 1
+ work_dir = None
while curr_layer < len(image_obj.layers): # pylint:disable=too-many-nested-blocks
+ # If workdir changes, update value accordingly
+ # so we can later execute base.yml commands from the workdir.
+ if image_obj.layers[curr_layer].get_layer_workdir() is not None:
+ work_dir = image_obj.layers[curr_layer].get_layer_workdir()
# if there is no shell, try to see if it exists in the current layer
if not shell:
shell = common.get_shell(image_obj.layers[curr_layer])
@@ -126,7 +131,7 @@ def analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj=None,
if isinstance(pkg_listing, str):
try:
common.add_base_packages(
- image_obj.layers[curr_layer], pkg_listing, shell)
+ image_obj.layers[curr_layer], pkg_listing, shell, work_dir)
except KeyboardInterrupt:
logger.critical(errors.keyboard_interrupt)
abort_analysis()
@@ -134,7 +139,7 @@ def analyze_subsequent_layers(image_obj, shell, master_list, redo, dfobj=None,
try:
common.add_snippet_packages(
image_obj.layers[curr_layer], command, pkg_listing,
- shell)
+ shell, work_dir)
except KeyboardInterrupt:
logger.critical(errors.keyboard_interrupt)
abort_analysis()
diff --git a/tern/classes/image_layer.py b/tern/classes/image_layer.py
index 46a74ad..484b802 100644
--- a/tern/classes/image_layer.py
+++ b/tern/classes/image_layer.py
@@ -327,3 +327,10 @@ class ImageLayer:
file_data.set_checksum('sha256', attrs_tuple[2])
file_data.extattrs = attrs_tuple[0]
self.add_file(file_data)
+
+ def get_layer_workdir(self):
+ # If the layer is created by a WORKDIR command then return the workdir
+ match = re.search(r"\bWORKDIR\ (\/\w+)+\b", self.created_by)
+ if match:
+ return match.group().split()[1]
+ return None
diff --git a/tern/command_lib/command_lib.py b/tern/command_lib/command_lib.py
index c28c1b1..d3dc999 100644
--- a/tern/command_lib/command_lib.py
+++ b/tern/command_lib/command_lib.py
@@ -205,7 +205,7 @@ def invoke_in_rootfs(snippet_list, shell, package=''):
raise
-def get_pkg_attr_list(shell, attr_dict, package_name='', chroot=True,
+def get_pkg_attr_list(shell, attr_dict, work_dir, package_name='', chroot=True, # pylint:disable=too-many-arguments
override=''):
'''The command library has package attributes listed like this:
{invoke: {1: {container: [command1, command2]},
@@ -225,6 +225,9 @@ def get_pkg_attr_list(shell, attr_dict, package_name='', chroot=True,
if 'container' in attr_dict['invoke'][step].keys():
snippet_list = attr_dict['invoke'][step]['container']
result = ''
+ # If work_dir exist cd into it
+ if work_dir is not None:
+ snippet_list.insert(0, 'cd ' + work_dir)
# if we need to run in a chroot environment
if chroot:
try:
diff --git a/tern/tools/verify_invoke.py b/tern/tools/verify_invoke.py
index 95a6265..d016430 100644
--- a/tern/tools/verify_invoke.py
+++ b/tern/tools/verify_invoke.py
@@ -28,6 +28,15 @@ def look_up_lib(keys):
return subd
+def get_workdir(image_obj):
+ # get the workdir from the image config where the commands will be executed
+ config = image_obj.get_image_config(image_obj.get_image_manifest())
+ workdir = config['config']['WorkingDir']
+ if workdir == '':
+ return None
+ return workdir
+
+
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''
@@ -78,8 +87,9 @@ if __name__ == '__main__':
info_dict = look_up_lib(args.keys)
# try to invoke the commands
try:
+ work_dir = get_workdir(image_obj)
result = command_lib.get_pkg_attr_list(
- args.shell, info_dict, args.package)
+ args.shell, info_dict, work_dir, args.package)
print('Output list: ' + ' '.join(result[0]))
print('Error messages: ' + result[1])
print('Number of elements: ' + str(len(result[0])))
|
tern-tools/tern
|
044dc470ec5be8aacbc085a5ae307c608ff13255
|
diff --git a/tests/test_analyze_common.py b/tests/test_analyze_common.py
index e40445c..82aba50 100644
--- a/tests/test_analyze_common.py
+++ b/tests/test_analyze_common.py
@@ -32,10 +32,18 @@ class TestAnalyzeCommon(unittest.TestCase):
del self.test_dockerfile
def testGetShellCommands(self):
- command = common.get_shell_commands("yum install nfs-utils")
+ command, _ = common.get_shell_commands("yum install nfs-utils")
self.assertEqual(type(command), list)
self.assertEqual(len(command), 1)
self.assertEqual(command[0].options, self.command1.options)
+ # test on branching command
+ branching_script = "if [ -z $var ]; then yum install nfs-utils; fi"
+ branch_command, report = common.get_shell_commands(branching_script)
+ self.assertEqual(type(branch_command), list)
+ # we will ignore branching command, so len should be 0
+ self.assertEqual(len(branch_command), 0)
+ # and the report should not be None
+ self.assertTrue(report)
def testLoadFromCache(self):
'''Given a layer object, populate the given layer in case the cache isn't empty'''
|
Tern has no concept of WORKDIR directory (required to collect go module info)
**Describe the bug**
Most golang Dockerfiles will have a `WORKDIR` where the go modules get installed. i.e.
```
# Start from the latest golang base image
FROM golang:1.14-alpine as builder
# Set the Current Working Directory inside the container
WORKDIR /app
# Copy go mod and sum files
COPY go.mod go.sum ./
# Download all dependencies. Dependencies will be cached if the go.mod and go.sum files are not changed
RUN go mod download
# Copy the source from the current directory to the Working Directory inside the container
COPY . .
# Build the Go app
RUN CGO_ENABLED=0 GOOS=linux go build -v -a -installsuffix cgo -o swag cmd/swag/main.go
```
If a `WORKDIR` is established, all the proceeding `RUN` , `CMD` , `ADD` , `COPY` , or `ENTRYPOINT` commands will be executed in this `WORKDIR`. Tern currently has no concept of a `WORKDIR`. This is an issue when it comes to analyzing the go modules in a docker image because the go command is only present in the `WORKDIR`, compared to most package manager utilities that are universally available in the image. The command `go list -m all | tail -n +2 | cut -d ' ' -f1` which attempts to collect the module names, will return `Command failed. go list -m: not using modules` if not executed from the proper WORKDIR directory. However, if you change the `base.yml` command to cd into the `WORKDIR` first, then the command executes without errors. cd-ing into the `WORKDIR` directory is not something we can do from `base.yml` because we won't know the name of the `WORKDIR` directory and there is currently no way to reference this.
**To Reproduce**
Steps to reproduce the behavior:
1. Add `go` package manager to base.yml and snippets.yml.
2. Run tern on a go image (similar to Dockerfile above)
3. If you change `base.yml` to cd into the `WORKDIR` first by running `cd /app; /usr/local/go/bin/go list -m all | tail -n +2 | cut -d ' ' -f1` to collect go modules, the command will work and Tern will output the go modules in the report. If you only use `/usr/local/go/bin/go list -m all | tail -n +2 | cut -d ' ' -f1` the command will fail.
4. See error/output below.
**Error in terminal**
Output when you cd into the WORKDIR first in `base.yml`:
```
File licenses found in Layer: None
Packages found in Layer: github.com/BurntSushi/toml-v0.3.1, github.com/KyleBanks/depth-v1.2.1,
github.com/PuerkitoBio/purell-v1.1.1, github.com/PuerkitoBio/urlesc-v0.0.0-20170810143723-de5bf2ad4578,
github.com/alecthomas/template-v0.0.0-20190718012654-fb15b899a751, github.com/cpuguy83/go-md2man/v2-
v2.0.0-20190314233015-f79a8a8ca69d, github.com/davecgh/go-spew-v1.1.1, github.com/ghodss/yaml-v1.0.0,
github.com/gin-contrib/gzip-v0.0.1, github.com/gin-contrib/sse-v0.1.0, github.com/gin-gonic/gin-v1.4.0, github.com/go-
openapi/jsonpointer-v0.19.3, github.com/go-openapi/jsonreference-v0.19.3, github.com/go-openapi/spec-v0.19.4,
github.com/go-openapi/swag-v0.19.5, github.com/golang/protobuf-v1.3.1, github.com/json-iterator/go-v1.1.6, github.com/kr
/pretty-v0.1.0, github.com/kr/pty-v1.1.5, github.com/kr/text-v0.1.0, github.com/mailru/easyjson-v0.0.0-20190626092158-
b2ccc519800e, github.com/mattn/go-isatty-v0.0.8, github.com/modern-go/concurrent-v0.0.0-20180306012644-
bacd9c7ef1dd, github.com/modern-go/reflect2-v1.0.1, github.com/pkg/errors-v0.8.1, github.com/pmezard/go-difflib-v1.0.0,
github.com/russross/blackfriday/v2-v2.0.1, github.com/satori/go.uuid-v1.2.0, github.com/shopspring/decimal-v1.2.0,
github.com/shurcooL/sanitized_anchor_name-v1.0.0, github.com/stretchr/objx-v0.2.0, github.com/stretchr/testify-v1.4.0,
github.com/swaggo/files-v0.0.0-20190704085106-630677cd5c14, github.com/swaggo/gin-swagger-v1.2.0,
github.com/ugorji/go-v1.1.5-pre, github.com/ugorji/go/codec-v1.1.5-pre, github.com/urfave/cli-v1.20.0, github.com/urfave
/cli/v2-v2.1.1, golang.org/x/crypto-v0.0.0-20190611184440-5c40567a22f8, golang.org/x/net-v0.0.0-20190827160401-
ba9fcec4b297, golang.org/x/sync-v0.0.0-20190423024810-112230192c58, golang.org/x/sys-
v0.0.0-20190616124812-15dcb6c0061f, golang.org/x/text-v0.3.2, golang.org/x/tools-
v0.0.0-20190614205625-5aca471b1d59, gopkg.in/check.v1-v1.0.0-20180628173108-788fd7840127, gopkg.in/go-
playground/assert.v1-v1.2.1, gopkg.in/go-playground/validator.v8-v8.18.2, gopkg.in/yaml.v2-v2.2.2
Licenses found in Layer: None
```
Output when you don't `cd` into the `WORKDIR`:
```
020-07-13 14:02:56,250 - DEBUG - rootfs - Running command: sudo unshare -pf --mount-proc=/home/rjudge/.tern/temp/mergedir/proc chroot /home/rjudge/.tern/temp/mergedir /bin/sh -c /usr/local/go/bin/go list -m all | tail -n +2 | cut -d ' ' -f1
2020-07-13 14:02:56,279 - ERROR - rootfs - Command failed. go list -m: not using modules
```
**Expected behavior**
Tern should be executing from the `WORKDIR` directory, if it exists.
|
0.0
|
044dc470ec5be8aacbc085a5ae307c608ff13255
|
[
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetShellCommands"
] |
[
"tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithDifferentCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithSameCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testFilterInstallCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithInstallFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithRemoveFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFilesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadNoticesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadPackagesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithIgnoreFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithoutIgnoreFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithoutFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testSaveToCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithPackages",
"tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithoutPackages"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-15 21:42:14+00:00
|
bsd-2-clause
| 5,855 |
|
terrencepreilly__darglint-24
|
diff --git a/darglint/integrity_checker.py b/darglint/integrity_checker.py
index 729993d..1d24c0f 100644
--- a/darglint/integrity_checker.py
+++ b/darglint/integrity_checker.py
@@ -184,7 +184,7 @@ class IntegrityChecker(object):
fun_type = self.function.return_type
doc_type = self.docstring.get_types(Sections.RETURNS_SECTION)
if not doc_type or isinstance(doc_type, list):
- doc_type = ''
+ doc_type = None
if fun_type is not None and doc_type is not None:
if fun_type != doc_type:
line_numbers = self.docstring.get_line_numbers(
|
terrencepreilly/darglint
|
3e8a1d23cf5f9a007094b34a984d5041631d2906
|
diff --git a/tests/test_integrity_checker.py b/tests/test_integrity_checker.py
index 53f6c8f..3d2b895 100644
--- a/tests/test_integrity_checker.py
+++ b/tests/test_integrity_checker.py
@@ -288,6 +288,40 @@ class IntegrityCheckerTestCase(TestCase):
self.assertEqual(error.expected, 'int')
self.assertEqual(error.actual, 'float')
+ def test_return_type_unchecked_if_not_defined_in_docstring(self):
+ program = '\n'.join([
+ 'def foo() -> str:',
+ ' """Just a foobar.',
+ '',
+ ' Returns:',
+ ' bar',
+ '',
+ ' """',
+ ' return "bar"',
+ ])
+ tree = ast.parse(program)
+ functions = get_function_descriptions(tree)
+ checker = IntegrityChecker()
+ checker.run_checks(functions[0])
+ self.assertEqual(len(checker.errors), 0)
+
+ def test_return_type_unchecked_if_not_defined_in_function(self):
+ program = '\n'.join([
+ 'def foo():',
+ ' """Just a foobar.',
+ '',
+ ' Returns:',
+ ' str: bar',
+ '',
+ ' """',
+ ' return "bar"',
+ ])
+ tree = ast.parse(program)
+ functions = get_function_descriptions(tree)
+ checker = IntegrityChecker()
+ checker.run_checks(functions[0])
+ self.assertEqual(len(checker.errors), 0)
+
def test_return_type_checked_if_defined_in_docstring_and_function(self):
program = '\n'.join([
'def update_model(x: dict) -> dict:',
|
I203 raised for return type annotations.
`darglint` raises an error (`I203 Return type mismatch: ~Return: expected str but was `) when return type annotations are provided without a declared docstring return type. This is likely an oversight, this check should only fire if _both_ a type annotation and docstring type declaration are present.
Failure example:
```
def foo() -> str:
"""Just a standard foobar.
Returns: Bar.
"""
return "bar"
````
|
0.0
|
3e8a1d23cf5f9a007094b34a984d5041631d2906
|
[
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_return_type_unchecked_if_not_defined_in_docstring"
] |
[
"tests/test_integrity_checker.py::IntegrityCheckerSphinxTestCase::test_missing_parameter",
"tests/test_integrity_checker.py::IntegrityCheckerSphinxTestCase::test_variable_doesnt_exist",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_arg_types_checked_if_in_both_docstring_and_function",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_bare_noqa",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_excess_parameter_added",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_excess_yield_added_to_errors",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_extra_raises_added_to_error",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_global_noqa",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_global_noqa_works_for_syntax_errors",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_incorrect_syntax_raises_exception_optionally",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_parameter_added",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_raises_added_to_error",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_return_parameter_added",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_missing_yield_added_to_errors",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_after_excess_raises",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_excess_return",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_excess_parameters",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_excess_yield",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_missing_parameters",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_missing_raises",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_missing_yield",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_parameter_type_mismatch",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_parameter_type_mismatch_by_name",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_for_return_type_mismatch",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_noqa_missing_return_parameter_added",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_return_type_checked_if_defined_in_docstring_and_function",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_return_type_unchecked_if_not_defined_in_function",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_skips_functions_without_docstrings",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_throws_assertion_if_no_colon_in_parameter_line",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_throws_assertion_if_no_content_after_colon",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_try_block_no_excess_error",
"tests/test_integrity_checker.py::IntegrityCheckerTestCase::test_yields_from_added_to_error"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-06-03 17:04:36+00:00
|
mit
| 5,856 |
|
terryyin__lizard-120
|
diff --git a/lizard.py b/lizard.py
index 4d21e8a..cac8020 100755
--- a/lizard.py
+++ b/lizard.py
@@ -316,7 +316,7 @@ class NestingStack(object):
self.pending_function = None
self.nesting_stack.append(Namespace(token))
- def start_new_funciton_nesting(self, function):
+ def start_new_function_nesting(self, function):
self.pending_function = function
def _create_nesting(self):
@@ -386,7 +386,7 @@ class FileInfoBuilder(object):
self.fileinfo.filename,
self.current_line)
self.current_function.top_nesting_level = self.current_nesting_level
- self.start_new_funciton_nesting(self.current_function)
+ self.start_new_function_nesting(self.current_function)
def add_condition(self, inc=1):
self.current_function.cyclomatic_complexity += inc
diff --git a/lizard_ext/lizardns.py b/lizard_ext/lizardns.py
index e057e73..4ee09bf 100644
--- a/lizard_ext/lizardns.py
+++ b/lizard_ext/lizardns.py
@@ -1,13 +1,16 @@
"""
This extension counts nested control structures within a function.
-The extension is implemented with C++ in mind.
+
+The extension is implemented with C++ and Python in mind,
+but it is expected to work with other languages supported by Lizard
+with its language reader implementing 'nesting_level' metric for tokens.
The code borrows heavily from implementation of Nesting Depth extension
originally written by Mehrdad Meh and Terry Yin.
"""
-from lizard import FileInfoBuilder, FunctionInfo
-from lizard_ext.lizardnd import patch, patch_append_method
+from lizard import FunctionInfo
+from lizard_ext.lizardnd import patch_append_method
DEFAULT_NS_THRESHOLD = 3
@@ -32,106 +35,90 @@ class LizardExtension(object): # pylint: disable=R0903
def __call__(self, tokens, reader):
"""The intent of the code is to detect control structures as entities.
- The complexity arises from tracking of
- control structures without brackets.
- The termination of such control structures in C-like languages
- is the next statement or control structure with a compound statement.
-
- Moreover, control structures with two or more tokens complicates
- the proper counting, for example, 'else if'.
+ The implementation relies on nesting level metric for tokens
+ provided by language readers.
+ If the following contract for the nesting level metric does not hold,
+ this implementation of nested structure counting is invalid.
- In Python with meaningful indentation,
- tracking the indentation levels becomes crucial
- to identify boundaries of the structures.
- The following code is not designed for Python.
- """
- structures = set(['if', 'else', 'foreach', 'for', 'while', 'do',
- 'try', 'catch', 'switch'])
+ If a control structure has started its block (eg. '{'),
+ and its level is **less** than the next structure,
+ the next structure is nested.
- structure_indicator = "{"
- structure_end = "}"
- indent_indicator = ";"
-
- for token in tokens:
- if reader.context.is_within_structure():
- if token == "(":
- reader.context.add_parentheses(1)
- elif token == ")":
- reader.context.add_parentheses(-1)
+ If a control structure has *not* started its block,
+ and its level is **no more** than the next structure,
+ the next structure is nested (compound statement).
- if not reader.context.is_within_parentheses():
- if token in structures:
- reader.context.add_nested_structure(token)
+ If a control structure level is **higher** than the next structure,
+ it is considered closed.
- elif token == structure_indicator:
- reader.context.add_brace()
-
- elif token == structure_end:
- reader.context.pop_brace()
- reader.context.pop_nested_structure()
-
- elif token == indent_indicator:
- reader.context.pop_nested_structure()
-
- yield token
+ If a control structure has started its block,
+ and its level is **equal** to the next structure,
+ it is considered closed.
-
-# TODO: Some weird false positive from pylint. # pylint: disable=fixme
-# pylint: disable=E1101
-class NSFileInfoAddition(FileInfoBuilder):
-
- def add_nested_structure(self, token):
- """Conditionally adds nested structures."""
- # Handle compound else-if.
- if token == "if" and self.current_function.structure_stack:
- prev_token, br_state = self.current_function.structure_stack[-1]
- if (prev_token == "else" and
- br_state == self.current_function.brace_count):
+ The level of any non-structure tokens is treated
+ with the same logic as for the next structures
+ for control block **starting** and **closing** purposes.
+ """
+ # TODO: Delegate this to language readers # pylint: disable=fixme
+ structures = set(['if', 'else', 'elif', 'for', 'foreach', 'while', 'do',
+ 'try', 'catch', 'switch', 'finally', 'except',
+ 'with'])
+
+ cur_level = 0
+ start_structure = [False] # Just to make it mutable.
+ structure_stack = [] # [(token, ns_level)]
+
+ def add_nested_structure(token):
+ """Conditionally adds nested structures."""
+ if structure_stack:
+ prev_token, ns_level = structure_stack[-1]
+ if cur_level == ns_level:
+ if (token == "if" and prev_token == "else" and
+ not start_structure[0]):
+ return # Compound 'else if' in C-like languages.
+ if start_structure[0]:
+ structure_stack.pop()
+ elif cur_level < ns_level:
+ while structure_stack and ns_level >= cur_level:
+ _, ns_level = structure_stack.pop()
+
+ structure_stack.append((token, cur_level))
+ start_structure[0] = False # Starts on the next level with body.
+
+ ns_cur = len(structure_stack)
+ if reader.context.current_function.max_nested_structures < ns_cur:
+ reader.context.current_function.max_nested_structures = ns_cur
+
+ def pop_nested_structure():
+ """Conditionally pops the nested structures if levels match."""
+ if not structure_stack:
return
- self.current_function.structure_stack.append(
- (token, self.current_function.brace_count))
-
- ns_cur = len(self.current_function.structure_stack)
- if self.current_function.max_nested_structures < ns_cur:
- self.current_function.max_nested_structures = ns_cur
+ _, ns_level = structure_stack[-1]
- def pop_nested_structure(self):
- """Conditionally pops the structure count if braces match."""
- if not self.current_function.structure_stack:
- return
+ if cur_level > ns_level:
+ start_structure[0] = True
- _, br_state = self.current_function.structure_stack[-1]
- if br_state == self.current_function.brace_count:
- self.current_function.structure_stack.pop()
+ elif cur_level < ns_level:
+ while structure_stack and ns_level >= cur_level:
+ _, ns_level = structure_stack.pop()
+ start_structure[0] = bool(structure_stack)
- def add_brace(self):
- self.current_function.brace_count += 1
+ elif start_structure[0]:
+ structure_stack.pop()
- def pop_brace(self):
- # pylint: disable=fixme
- # TODO: For some reason, brace count goes negative.
- # assert self.current_function.brace_count > 0
- self.current_function.brace_count -= 1
-
- def add_parentheses(self, inc):
- """Dual purpose parentheses manipulator."""
- self.current_function.paren_count += inc
-
- def is_within_parentheses(self):
- assert self.current_function.paren_count >= 0
- return self.current_function.paren_count != 0
+ for token in tokens:
+ cur_level = reader.context.current_nesting_level
+ if token in structures:
+ add_nested_structure(token)
+ else:
+ pop_nested_structure()
- def is_within_structure(self):
- return bool(self.current_function.structure_stack)
+ yield token
def _init_nested_structure_data(self, *_):
self.max_nested_structures = 0
- self.brace_count = 0
- self.paren_count = 0
- self.structure_stack = []
-patch(NSFileInfoAddition, FileInfoBuilder)
patch_append_method(_init_nested_structure_data, FunctionInfo, "__init__")
|
terryyin/lizard
|
bdcc784bd22d8e48db22884dfeb42647ffb67fbf
|
diff --git a/test/testNestedStructures.py b/test/testNestedStructures.py
old mode 100755
new mode 100644
index 7eee514..1a2d826
--- a/test/testNestedStructures.py
+++ b/test/testNestedStructures.py
@@ -1,5 +1,7 @@
import unittest
-from .testHelpers import get_cpp_function_list_with_extnesion
+
+from .testHelpers import get_cpp_function_list_with_extnesion, \
+ get_python_function_list_with_extnesion
from lizard_ext.lizardns import LizardExtension as NestedStructure
@@ -7,6 +9,10 @@ def process_cpp(source):
return get_cpp_function_list_with_extnesion(source, NestedStructure())
+def process_python(source):
+ return get_python_function_list_with_extnesion(source, NestedStructure())
+
+
class TestCppNestedStructures(unittest.TestCase):
def test_no_structures(self):
@@ -209,3 +215,122 @@ class TestCppNestedStructures(unittest.TestCase):
}
""")
self.assertEqual(3, result[0].max_nested_structures)
+
+
+class TestPythonNestedStructures(unittest.TestCase):
+
+ def test_no_structures(self):
+ result = process_python("def fun():\n pass")
+ self.assertEqual(0, result[0].max_nested_structures)
+
+ def test_if_structure(self):
+ result = process_python("def fun():\n if a:\n return")
+ self.assertEqual(1, result[0].max_nested_structures)
+
+ def test_for_structure(self):
+ result = process_python("def fun():\n for a in b:\n foo()")
+ self.assertEqual(1, result[0].max_nested_structures)
+
+ def test_condition_in_if_structure(self):
+ result = process_python("def fun():\n if a and b:\n return")
+ self.assertEqual(1, result[0].max_nested_structures)
+
+ def test_elif(self):
+ result = process_python("""
+ def c():
+ if a:
+ baz()
+ elif c:
+ foo()
+ """)
+ self.assertEqual(1, result[0].max_nested_structures)
+
+ def test_nested_if_structures(self):
+ result = process_python("""
+ def c():
+ if a:
+ if b:
+ baz()
+ else:
+ foo()
+ """)
+ self.assertEqual(2, result[0].max_nested_structures)
+
+ def test_equal_metric_structures(self):
+ result = process_python("""
+ def c():
+ if a:
+ if b:
+ baz()
+ else:
+ foo()
+
+ for a in b:
+ if c:
+ bar()
+ """)
+ self.assertEqual(2, result[0].max_nested_structures)
+
+ def test_while(self):
+ result = process_python("""
+ def c():
+ while a:
+ baz()
+ """)
+ self.assertEqual(1, result[0].max_nested_structures)
+
+ def test_try_catch(self):
+ result = process_python("""
+ def c():
+ try:
+ f.open()
+ catch Exception as err:
+ print(err)
+ finally:
+ f.close()
+ """)
+ self.assertEqual(1, result[0].max_nested_structures)
+
+ def test_two_functions(self):
+ result = process_python("""
+ def c():
+ try:
+ if a:
+ foo()
+ catch Exception as err:
+ print(err)
+
+ def d():
+ for a in b:
+ for x in y:
+ if i:
+ return j
+ """)
+ self.assertEqual(2, result[0].max_nested_structures)
+ self.assertEqual(3, result[1].max_nested_structures)
+
+ def test_nested_functions(self):
+ result = process_python("""
+ def c():
+ def d():
+ for a in b:
+ for x in y:
+ if i:
+ return j
+ try:
+ if a:
+ foo()
+ catch Exception as err:
+ print(err)
+
+ """)
+ self.assertEqual(3, result[0].max_nested_structures)
+ self.assertEqual(2, result[1].max_nested_structures)
+
+ def test_with_structure(self):
+ result = process_python("""
+ def c():
+ with open(f) as input_file:
+ foo(f)
+ """)
+ self.assertEqual(1, result[0].max_nested_structures)
|
Detection of Deeply Nested Control Structures
This metric may not apply to the whole function,
but the maximum 'nestedness' (nesting for-loops, if-statements, etc.)
may be an interesting metric to detect code smell.
It closely relates to indentation.
Got this from the Linux kernel coding style:
>The answer to that is that if you need
more than 3 levels of indentation, you're screwed anyway, and should fix
your program.
|
0.0
|
bdcc784bd22d8e48db22884dfeb42647ffb67fbf
|
[
"test/testNestedStructures.py::TestPythonNestedStructures::test_equal_metric_structures",
"test/testNestedStructures.py::TestPythonNestedStructures::test_nested_functions",
"test/testNestedStructures.py::TestPythonNestedStructures::test_nested_if_structures",
"test/testNestedStructures.py::TestPythonNestedStructures::test_try_catch",
"test/testNestedStructures.py::TestPythonNestedStructures::test_two_functions",
"test/testNestedStructures.py::TestPythonNestedStructures::test_with_structure"
] |
[
"test/testNestedStructures.py::TestCppNestedStructures::test_and_condition_in_if_structure",
"test/testNestedStructures.py::TestCppNestedStructures::test_do",
"test/testNestedStructures.py::TestCppNestedStructures::test_forever_loop",
"test/testNestedStructures.py::TestCppNestedStructures::test_if_structure",
"test/testNestedStructures.py::TestCppNestedStructures::test_nested_if_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_nested_loop_mixed_brackets",
"test/testNestedStructures.py::TestCppNestedStructures::test_no_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_non_r_value_ref_in_body",
"test/testNestedStructures.py::TestCppNestedStructures::test_scope",
"test/testNestedStructures.py::TestCppNestedStructures::test_switch_case",
"test/testNestedStructures.py::TestCppNestedStructures::test_terminator_in_parentheses",
"test/testNestedStructures.py::TestCppNestedStructures::test_ternary_operator",
"test/testNestedStructures.py::TestCppNestedStructures::test_try_catch",
"test/testNestedStructures.py::TestCppNestedStructures::test_while",
"test/testNestedStructures.py::TestPythonNestedStructures::test_condition_in_if_structure",
"test/testNestedStructures.py::TestPythonNestedStructures::test_elif",
"test/testNestedStructures.py::TestPythonNestedStructures::test_for_structure",
"test/testNestedStructures.py::TestPythonNestedStructures::test_if_structure",
"test/testNestedStructures.py::TestPythonNestedStructures::test_no_structures",
"test/testNestedStructures.py::TestPythonNestedStructures::test_while"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-05-08 06:41:31+00:00
|
mit
| 5,857 |
|
terryyin__lizard-144
|
diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py
index 1134e96..a17fb03 100644
--- a/lizard_languages/clike.py
+++ b/lizard_languages/clike.py
@@ -235,7 +235,7 @@ class CLikeStates(CodeStateMachine):
self.context.add_to_long_function_name(token)
def _state_dec_to_imp(self, token):
- if token == 'const' or token == 'noexcept':
+ if token in ('const', 'noexcept', '&', '&&'):
self.context.add_to_long_function_name(" " + token)
elif token == 'throw':
self._state = self._state_throw
|
terryyin/lizard
|
1933addc0f0d4febb8b2273048f81556c0062d61
|
diff --git a/test/testCyclomaticComplexity.py b/test/testCyclomaticComplexity.py
index 346117e..d6efefa 100644
--- a/test/testCyclomaticComplexity.py
+++ b/test/testCyclomaticComplexity.py
@@ -79,3 +79,13 @@ class TestCppCyclomaticComplexity(unittest.TestCase):
""")
self.assertEqual(4, result[0].cyclomatic_complexity)
+ def test_ref_qualifiers(self):
+ """C++11 rvalue ref qualifiers look like AND operator."""
+ result = get_cpp_function_list(
+ "struct A { void foo() && { return bar() && baz(); } };")
+ self.assertEqual(1, len(result))
+ self.assertEqual(2, result[0].cyclomatic_complexity)
+ result = get_cpp_function_list(
+ "struct A { void foo() const && { return bar() && baz(); } };")
+ self.assertEqual(1, len(result))
+ self.assertEqual(2, result[0].cyclomatic_complexity)
diff --git a/test/test_languages/testCAndCPP.py b/test/test_languages/testCAndCPP.py
index 0928b15..b175fcd 100644
--- a/test/test_languages/testCAndCPP.py
+++ b/test/test_languages/testCAndCPP.py
@@ -423,6 +423,7 @@ class Test_c_cpp_lizard(unittest.TestCase):
result = get_cpp_function_list('''int fun(struct a){}''')
self.assertEqual(1, len(result))
+
def test_trailing_return_type(self):
"""C++11 trailing return type for functions."""
result = get_cpp_function_list("auto foo() -> void {}")
@@ -432,6 +433,21 @@ class Test_c_cpp_lizard(unittest.TestCase):
self.assertEqual(1, len(result))
self.assertEqual("foo", result[0].name)
+ def test_ref_qualifiers(self):
+ """C++11 ref qualifiers for member functions."""
+ result = get_cpp_function_list("struct A { void foo() & {} };")
+ self.assertEqual(1, len(result))
+ self.assertEqual("A::foo", result[0].name)
+ result = get_cpp_function_list("struct A { void foo() const & {} };")
+ self.assertEqual(1, len(result))
+ self.assertEqual("A::foo", result[0].name)
+ result = get_cpp_function_list("struct A { void foo() && {} };")
+ self.assertEqual(1, len(result))
+ self.assertEqual("A::foo", result[0].name)
+ result = get_cpp_function_list("struct A { void foo() const && {} };")
+ self.assertEqual(1, len(result))
+ self.assertEqual("A::foo", result[0].name)
+
class Test_Preprocessing(unittest.TestCase):
|
Bug: C++11 ref qualified functions
Lizard misses C++11 ref qualified member functions.
These functions don't appear in the report or the result database.
```cpp
struct A {
void foo() & {};
void foo() const & {};
void foo() && {};
void foo() const && {};
};
|
0.0
|
1933addc0f0d4febb8b2273048f81556c0062d61
|
[
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_ref_qualifiers",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_ref_qualifiers"
] |
[
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_and",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_else_if",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_forever_loop",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_no_condition",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_non_r_value_ref_in_body",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_one_condition",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_question_mark",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_r_value_ref_in_body",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_r_value_ref_in_parameter",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_statement_no_curly_brackets",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_one_function_with_typedef",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_sharp_if_and_sharp_elif_counts_in_cc_number",
"test/testCyclomaticComplexity.py::TestCppCyclomaticComplexity::test_two_function_with_non_r_value_ref_in_body",
"test/test_languages/testCAndCPP.py::Test_C_Token_extension::test_connecting_marcro",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_1",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_braket_that_is_not_a_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_class_with_inheritance",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_complicated_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initializer_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_uniform_initialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_destructor_implementation",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_nested_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_slash_within_string",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_empty",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_followed_with_one_word_is_ok",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_throw",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_declaration_is_not_counted",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_name_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_operator",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_that_returns_function_pointers",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_1_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_content",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_no_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param2",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_global_var_constructor",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_inline_operator",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_less_then_is_not_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_namespace_alias",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class_middle",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_unnamed_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_no_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_initializer_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_uniform_initialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_not_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function_has_semicolon",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_in_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_const",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_macro_in_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_only_word_can_be_function_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_shift",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_with_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_with_complicated_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_parentheses_before_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_pre_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_return_type",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_stupid_macro_before_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_part_of_function_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_full_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_partial_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_pointer",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference_as_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_trailing_return_type",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_simplest_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_typedef_is_not_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_underscore",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_content_macro_should_be_ignored",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessor_is_not_function",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessors_should_be_ignored_outside_function_implementation",
"test/test_languages/testCAndCPP.py::Test_Big::test_trouble"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2016-08-03 13:54:59+00:00
|
mit
| 5,858 |
|
terryyin__lizard-174
|
diff --git a/lizard_ext/lizardns.py b/lizard_ext/lizardns.py
index fe24dfc..3330550 100644
--- a/lizard_ext/lizardns.py
+++ b/lizard_ext/lizardns.py
@@ -39,7 +39,7 @@ class LizardExtension(object): # pylint: disable=R0903
If the following contract for the nesting level metric does not hold,
this implementation of nested structure counting is invalid.
- If a control structure has started its block (eg. '{'),
+ If a control structure has started its block (e.g., '{'),
and its level is **less** than the next structure,
the next structure is nested.
@@ -107,14 +107,13 @@ class LizardExtension(object): # pylint: disable=R0903
structure_stack.pop()
for token in tokens:
+ yield token
cur_level = reader.context.current_nesting_level
if token in structures:
add_nested_structure(token)
else:
pop_nested_structure()
- yield token
-
def _init_nested_structure_data(self, *_):
self.max_nested_structures = 0
diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py
index 20b683f..bf97d97 100644
--- a/lizard_languages/clike.py
+++ b/lizard_languages/clike.py
@@ -88,50 +88,69 @@ class CLikeNestingStackStates(CodeStateMachine):
The handling of these complex cases is unspecified and can be ignored.
"""
- # Beasts that can be defined within one line without braces.
- __braceless_structures = set(['if', 'else', 'for', 'while', 'do',
- 'switch'])
- __paren_count = 0 # Used only to tackle the beasts.
- __braceless = None # Applies only to the beasts.
- __structure_brace_stack = [] # Boolean stack for structures' brace states.
-
- def __pop_braceless_structures(self):
- """Pops structures up to the one with braces."""
+ __structures = set(["if", "else", "for", "while", "do", "switch",
+ "try", "catch"])
+ # Structures paired on the same nesting level.
+ __paired_structures = {"if": "else", "try": "catch", "catch": "catch",
+ "do": "while"}
+ __wait_for_pair = False # Wait for the pair structure to close the level.
+ __structure_brace_stack = [] # Structure and brace states.
+
+ def __pop_without_pair(self):
+ """Continue poping nesting levels without the pair."""
+ self.__wait_for_pair = False
+ while (self.__structure_brace_stack and
+ self.__structure_brace_stack[-1]):
+ structure = self.__structure_brace_stack.pop()
+ self.context.pop_nesting()
+ if structure in self.__paired_structures:
+ self.__wait_for_pair = self.__paired_structures[structure]
+ return
+
+ def __pop_structures(self):
+ """Pops structures up to the one with braces or a waiting pair."""
self.context.pop_nesting()
- is_structure = None
+ structure = None
if self.__structure_brace_stack:
- is_structure = self.__structure_brace_stack.pop()
+ structure = self.__structure_brace_stack.pop()
- while (is_structure is not None and self.__structure_brace_stack and
- self.__structure_brace_stack[-1]):
- self.__structure_brace_stack.pop()
- self.context.pop_nesting()
+ if structure is None:
+ return
+ if structure in self.__paired_structures:
+ self.__wait_for_pair = self.__paired_structures[structure]
+ return
+ self.__pop_without_pair()
def __else_if_structure(self, token):
"""Handles possible compound 'else if' after 'else' token."""
self._state = self.__declare_structure
- if token != "if":
+ if token == "if":
+ self.__structure_brace_stack[-1] = "if"
+ else:
self._state(token)
+ @CodeStateMachine.read_inside_brackets_then("()")
def __declare_structure(self, token):
"""Ignores structures between parentheses on structure declaration."""
- if token == "(":
- self.__paren_count += 1
- elif token == ")":
- # assert self.__paren_count > 0
- self.__paren_count -= 1
- elif self.__paren_count == 0:
- self._state = self._state_global
- if token == "{":
- self.__braceless = False
- else:
- self.__braceless = True
- self.context.add_bare_nesting()
- self.__structure_brace_stack.append(True)
+ self.context.add_bare_nesting()
+ self._state = self._state_structure
+ if token != ")":
+ self._state(token)
+
+ def _state_structure(self, token):
+ """Control-flow structure states right before the body."""
+ self._state = self._state_global
+ if token == "{":
+ self.context.add_bare_nesting()
+ self.__structure_brace_stack.append(False)
+ else:
self._state(token)
def _state_global(self, token):
"""Dual-purpose state for global and structure bodies."""
+ while self.__wait_for_pair and token != self.__wait_for_pair:
+ self.__pop_without_pair()
+
if token == "template":
self._state = self._template_declaration
@@ -140,16 +159,15 @@ class CLikeNestingStackStates(CodeStateMachine):
elif token == "{":
self.context.add_bare_nesting()
- self.__structure_brace_stack.append(self.__braceless)
- self.__braceless = None
+ self.__structure_brace_stack.append(None) # Non-structure braces.
elif token == '}' or (token == ";" and self.__structure_brace_stack and
self.__structure_brace_stack[-1]):
- self.__braceless = None
- self.__pop_braceless_structures()
+ self.__pop_structures()
- elif token in self.__braceless_structures:
- # assert self.__paren_count == 0
+ elif token in self.__structures:
+ self.__wait_for_pair = False
+ self.__structure_brace_stack.append(token)
if token == "else":
self._state = self.__else_if_structure
else:
|
terryyin/lizard
|
71478c51b2d16688efd489ae41e16f21c89df0ca
|
diff --git a/test/testNestedStructures.py b/test/testNestedStructures.py
index 2f77547..5e565fb 100644
--- a/test/testNestedStructures.py
+++ b/test/testNestedStructures.py
@@ -175,6 +175,32 @@ class TestCppNestedStructures(unittest.TestCase):
self.assertEqual(2, result[0].max_nested_structures)
self.assertEqual(2, result[1].max_nested_structures)
+ def test_braceless_nested_if_try_structures(self):
+ result = process_cpp("""
+ x c() {
+ if (a)
+ try {
+ throw 42;
+ } catch(...) {
+ if (b) return 42;
+ }
+ }
+ """)
+ self.assertEqual(3, result[0].max_nested_structures)
+
+ def test_braceless_nested_for_try_structures(self):
+ result = process_cpp("""
+ x c() {
+ for (;;)
+ try {
+ throw 42;
+ } catch(...) {
+ if (b) return 42;
+ }
+ }
+ """)
+ self.assertEqual(3, result[0].max_nested_structures)
+
def test_switch_case(self):
"""Switch-Case is one control structure."""
result = process_cpp("""
@@ -228,6 +254,74 @@ class TestCppNestedStructures(unittest.TestCase):
""")
self.assertEqual(3, result[0].max_nested_structures)
+ def test_braceless_consecutive_if_structures(self):
+ """Braceless structures one after another."""
+ result = process_cpp("""
+ x c() {
+ if (a)
+ if (b)
+ foobar();
+ if (c)
+ if (d)
+ baz();
+ }
+ """)
+ self.assertEqual(2, result[0].max_nested_structures)
+
+ def test_braceless_consecutive_for_if_structures(self):
+ """Braceless structures one after another."""
+ result = process_cpp("""
+ x c() {
+ for (;;)
+ for (;;)
+ foobar();
+ if (c)
+ if (d)
+ baz();
+ }
+ """)
+ self.assertEqual(2, result[0].max_nested_structures)
+
+ def test_braceless_consecutive_if_structures_with_return(self):
+ """Braceless structures one after another."""
+ result = process_cpp("""
+ x c() {
+ if (a)
+ if (b)
+ return true;
+ if (c)
+ if (d)
+ return false;
+ }
+ """)
+ self.assertEqual(2, result[0].max_nested_structures)
+
+ def test_braceless_nested_if_else_structures(self):
+ result = process_cpp("""
+ x c() {
+ if (a)
+ if (b) {
+ return b;
+ } else {
+ if (b) return 42;
+ }
+ }
+ """)
+ self.assertEqual(3, result[0].max_nested_structures)
+
+ def test_braceless_nested_if_else_if_structures(self):
+ result = process_cpp("""
+ x c() {
+ if (a)
+ if (b) {
+ return b;
+ } else if (c) {
+ if (b) return 42;
+ }
+ }
+ """)
+ self.assertEqual(3, result[0].max_nested_structures)
+
@unittest.skip("Unspecified. Not Implemented. Convoluted.")
def test_struct_inside_declaration(self):
"""Extra complexity class/struct should be ignored."""
|
a non-structural failure case in NS metric
Hi,
I am currently running NS metric on my repository and faced a bug return statement.
The following unit test clarifies my claim:
```
def test_non_structure_braces_with_return(self):
"""return statements in non-structural nesting level may confuse the nesting level."""
result = process_cpp("""
x c() {
if (a)
if (b)
return false;
if (c)
if (d)
return false;
}
""")
self.assertEqual(3, result[0].max_nested_structures) # should be valued 2
```
commit: 23ec9e8e0091bf24a13d30b72fbe4df5b77b971a
Looking forward to hearing from you regarding this issue.
|
0.0
|
71478c51b2d16688efd489ae41e16f21c89df0ca
|
[
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_consecutive_for_if_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_consecutive_if_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_consecutive_if_structures_with_return",
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_for_try_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_if_try_structures"
] |
[
"test/testNestedStructures.py::TestCppNestedStructures::test_and_condition_in_if_structure",
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_if_else_if_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_braceless_nested_if_else_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_do",
"test/testNestedStructures.py::TestCppNestedStructures::test_else_if",
"test/testNestedStructures.py::TestCppNestedStructures::test_equal_metric_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_forever_loop",
"test/testNestedStructures.py::TestCppNestedStructures::test_gotcha_if_else",
"test/testNestedStructures.py::TestCppNestedStructures::test_if_structure",
"test/testNestedStructures.py::TestCppNestedStructures::test_nested_if_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_nested_loop_mixed_brackets",
"test/testNestedStructures.py::TestCppNestedStructures::test_no_structures",
"test/testNestedStructures.py::TestCppNestedStructures::test_non_r_value_ref_in_body",
"test/testNestedStructures.py::TestCppNestedStructures::test_non_structure_braces",
"test/testNestedStructures.py::TestCppNestedStructures::test_scope",
"test/testNestedStructures.py::TestCppNestedStructures::test_switch_case",
"test/testNestedStructures.py::TestCppNestedStructures::test_terminator_in_parentheses",
"test/testNestedStructures.py::TestCppNestedStructures::test_ternary_operator",
"test/testNestedStructures.py::TestCppNestedStructures::test_try_catch",
"test/testNestedStructures.py::TestCppNestedStructures::test_while",
"test/testNestedStructures.py::TestPythonNestedStructures::test_condition_in_if_structure",
"test/testNestedStructures.py::TestPythonNestedStructures::test_elif",
"test/testNestedStructures.py::TestPythonNestedStructures::test_equal_metric_structures",
"test/testNestedStructures.py::TestPythonNestedStructures::test_for_else",
"test/testNestedStructures.py::TestPythonNestedStructures::test_for_structure",
"test/testNestedStructures.py::TestPythonNestedStructures::test_if_structure",
"test/testNestedStructures.py::TestPythonNestedStructures::test_nested_functions",
"test/testNestedStructures.py::TestPythonNestedStructures::test_nested_if_structures",
"test/testNestedStructures.py::TestPythonNestedStructures::test_no_structures",
"test/testNestedStructures.py::TestPythonNestedStructures::test_try_catch",
"test/testNestedStructures.py::TestPythonNestedStructures::test_two_functions",
"test/testNestedStructures.py::TestPythonNestedStructures::test_while",
"test/testNestedStructures.py::TestPythonNestedStructures::test_while_else",
"test/testNestedStructures.py::TestPythonNestedStructures::test_with_structure"
] |
{
"failed_lite_validators": [
"has_git_commit_hash",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-12-30 21:41:57+00:00
|
mit
| 5,859 |
|
terryyin__lizard-191
|
diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py
index 33e1c3d..2c1af01 100644
--- a/lizard_languages/clike.py
+++ b/lizard_languages/clike.py
@@ -155,7 +155,7 @@ class CLikeNestingStackStates(CodeStateMachine):
if token == "template":
self._state = self._template_declaration
- elif token in ("struct", "class", "namespace"):
+ elif token in ("struct", "class", "namespace", "union"):
self._state = self._read_namespace
elif token == "{":
|
terryyin/lizard
|
48de756b52b92705f2127353b54d5a4ddac71187
|
diff --git a/test/test_languages/testCAndCPP.py b/test/test_languages/testCAndCPP.py
index cd5569b..41a1b13 100644
--- a/test/test_languages/testCAndCPP.py
+++ b/test/test_languages/testCAndCPP.py
@@ -461,6 +461,11 @@ class Test_c_cpp_lizard(unittest.TestCase):
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
+ def test_union_as_qualifier(self):
+ """Union as namespace for functions."""
+ result = get_cpp_function_list("union A { void foo() {} };")
+ self.assertEqual(1, len(result))
+ self.assertEqual("A::foo", result[0].name)
class Test_cpp11_Attributes(unittest.TestCase):
"""C++11 extendable attributes can appear pretty much anywhere."""
|
Lizard not handling functions within unions correctly
For the following code:
```c++
namespace foo{
void myFunction() { }
union bar{
void mySecondFunction() { }
};
class dog{
void bark() { }
};
};
```
Lizard generates the following output:

This shows that the long_name generated by lizard includes the namespace or the class that a function is contained in but does not contain the union the function is contained in. This inconsistency can cause issues for code analysis tools.
|
0.0
|
48de756b52b92705f2127353b54d5a4ddac71187
|
[
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_union_as_qualifier"
] |
[
"test/test_languages/testCAndCPP.py::Test_C_Token_extension::test_connecting_marcro",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_1",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_braket_that_is_not_a_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_class_with_inheritance",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_complicated_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initializer_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_uniform_initialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_destructor_implementation",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_nested_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_slash_within_string",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_empty",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_followed_with_one_word_is_ok",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_throw",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_declaration_is_not_counted",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_name_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_operator",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_that_returns_function_pointers",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_1_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_content",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_no_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param2",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_global_var_constructor",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_inline_operator",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_less_then_is_not_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_namespace_alias",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class_middle",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_unnamed_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_no_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_initializer_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_uniform_initialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_not_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function_has_semicolon",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_in_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_const",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_throw",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_macro_in_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_only_word_can_be_function_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_shift",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_with_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_with_complicated_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_parentheses_before_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_pre_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_ref_qualifiers",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_return_type",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_stupid_macro_before_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_part_of_function_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_full_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_partial_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_pointer",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference_as_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_trailing_return_type",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_simplest_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_typedef_is_not_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_underscore",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_class",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_control_structures",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function_parameters",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function_return_type",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_namespace",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_content_macro_should_be_ignored",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessor_is_not_function",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessors_should_be_ignored_outside_function_implementation",
"test/test_languages/testCAndCPP.py::Test_Big::test_trouble",
"test/test_languages/testCAndCPP.py::Test_Dialects::test_cuda_kernel_launch"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-06-29 21:53:30+00:00
|
mit
| 5,860 |
|
terryyin__lizard-328
|
diff --git a/lizard_languages/__init__.py b/lizard_languages/__init__.py
index 94b9052..785a09d 100644
--- a/lizard_languages/__init__.py
+++ b/lizard_languages/__init__.py
@@ -3,6 +3,7 @@
from .clike import CLikeReader
from .java import JavaReader
from .javascript import JavaScriptReader
+from .kotlin import KotlinReader
from .python import PythonReader
from .objc import ObjCReader
from .ttcn import TTCNReader
@@ -37,7 +38,8 @@ def languages():
LuaReader,
RustReader,
TypeScriptReader,
- FortranReader
+ FortranReader,
+ KotlinReader
]
diff --git a/lizard_languages/kotlin.py b/lizard_languages/kotlin.py
new file mode 100644
index 0000000..f5c7b1b
--- /dev/null
+++ b/lizard_languages/kotlin.py
@@ -0,0 +1,92 @@
+'''
+Language parser for Apple Swift
+'''
+
+from .clike import CCppCommentsMixin
+from .code_reader import CodeReader, CodeStateMachine
+from .golike import GoLikeStates
+from .swift import SwiftReplaceLabel
+
+
+class KotlinReader(CodeReader, CCppCommentsMixin, SwiftReplaceLabel):
+ # pylint: disable=R0903
+
+ ext = ['kt', 'kts']
+ language_names = ['kotlin']
+ _conditions = {
+ 'if', 'for', 'while', 'catch', '&&', '||', '?:'
+ }
+
+ def __init__(self, context):
+ super(KotlinReader, self).__init__(context)
+ self.parallel_states = [KotlinStates(context)]
+
+ @staticmethod
+ def generate_tokens(source_code, addition='', token_class=None):
+ return CodeReader.generate_tokens(
+ source_code,
+ r"|`\w+`" +
+ r"|\w+\?" +
+ r"|\w+\!!" +
+ r"|\?\?" +
+ r"|\?:" +
+ addition
+ )
+
+
+class KotlinStates(GoLikeStates): # pylint: disable=R0903
+
+ FUNC_KEYWORD = 'fun'
+
+ def __init__(self, context, in_when_cases=False):
+ super().__init__(context)
+ self._in_when_cases = in_when_cases
+
+ def _state_global(self, token):
+ if token in ('get', 'set'):
+ self.context.push_new_function(token)
+ self._state = self._expect_function_impl
+ elif token == '->':
+ if self._in_when_cases:
+ self.context.add_condition()
+ else:
+ self.context.push_new_function("(anonymous)")
+ self._state = super(KotlinStates, self)._expect_function_impl
+ elif token in ('val', 'var', ','):
+ self._state = self._expect_declaration_name
+ elif token == 'interface':
+ self._state = self._interface
+ elif token == 'when':
+ self._state = self._when_cases
+ else:
+ super(KotlinStates, self)._state_global(token)
+
+ def _expect_declaration_name(self, token):
+ self._state = self._state_global
+
+ def _expect_function_impl(self, token):
+ if token == '{' or token == '=':
+ self.next(self._function_impl, token)
+
+ @CodeStateMachine.read_inside_brackets_then("{}")
+ def _interface(self, end_token):
+ if end_token == "}":
+ self._state = self._state_global
+
+ def _function_name(self, token):
+ if token == "<":
+ self.next(self._template, token)
+ else:
+ return super(KotlinStates, self)._function_name(token)
+
+ @CodeStateMachine.read_inside_brackets_then("<>", "_function_name")
+ def _template(self, tokens):
+ pass
+
+ def _when_cases(self, token):
+ def callback():
+ self.context.add_condition(inc=-1)
+ self.next(self._state_global)
+ if token != '{':
+ return
+ self.sub_state(KotlinStates(self.context, in_when_cases=True), callback)
diff --git a/lizard_languages/swift.py b/lizard_languages/swift.py
index dc4eed2..3b1cbcc 100644
--- a/lizard_languages/swift.py
+++ b/lizard_languages/swift.py
@@ -7,7 +7,24 @@ from .clike import CCppCommentsMixin
from .golike import GoLikeStates
-class SwiftReader(CodeReader, CCppCommentsMixin):
+class SwiftReplaceLabel:
+ def preprocess(self, tokens):
+ tokens = list(t for t in tokens if not t.isspace() or t == '\n')
+
+ def replace_label(tokens, target, replace):
+ for i in range(0, len(tokens) - len(target)):
+ if tokens[i:i + len(target)] == target:
+ for j, repl in enumerate(replace):
+ tokens[i + j] = repl
+ return tokens
+
+ for k in (k for k in self.conditions if k.isalpha()):
+ tokens = replace_label(tokens, ["(", k, ":"], ["(", "_" + k, ":"])
+ tokens = replace_label(tokens, [",", k, ":"], [",", "_" + k, ":"])
+ return tokens
+
+
+class SwiftReader(CodeReader, CCppCommentsMixin, SwiftReplaceLabel):
# pylint: disable=R0903
FUNC_KEYWORD = 'def'
@@ -30,20 +47,6 @@ class SwiftReader(CodeReader, CCppCommentsMixin):
r"|\?\?" +
addition)
- def preprocess(self, tokens):
- tokens = list(t for t in tokens if not t.isspace() or t == '\n')
-
- def replace_label(tokens, target, replace):
- for i in range(0, len(tokens) - len(target)):
- if tokens[i:i + len(target)] == target:
- for j, repl in enumerate(replace):
- tokens[i + j] = repl
- return tokens
- for k in (k for k in self.conditions if k.isalpha()):
- tokens = replace_label(tokens, ["(", k, ":"], ["(", "_" + k, ":"])
- tokens = replace_label(tokens, [",", k, ":"], [",", "_" + k, ":"])
- return tokens
-
class SwiftStates(GoLikeStates): # pylint: disable=R0903
def _state_global(self, token):
|
terryyin/lizard
|
b93998d08b9cd08c8de91a65964f7a882e8883c9
|
diff --git a/test/test_languages/testKotlin.py b/test/test_languages/testKotlin.py
new file mode 100644
index 0000000..a9e94bd
--- /dev/null
+++ b/test/test_languages/testKotlin.py
@@ -0,0 +1,226 @@
+import unittest
+
+from lizard import analyze_file
+from lizard_languages import KotlinReader
+
+
+def get_kotlin_function_list(source_code):
+ return analyze_file.analyze_source_code(
+ "a.kt", source_code
+ ).function_list
+
+
+class Test_tokenizing_Kotlin(unittest.TestCase):
+
+ def check_tokens(self, expect, source):
+ tokens = list(KotlinReader.generate_tokens(source))
+ self.assertEqual(expect, tokens)
+
+ def test_dollar_var(self):
+ self.check_tokens(['`a`'], '`a`')
+
+
+class Test_parser_for_Kotlin(unittest.TestCase):
+
+ def test_empty(self):
+ functions = get_kotlin_function_list("")
+ self.assertEqual(0, len(functions))
+
+ def test_no_function(self):
+ result = get_kotlin_function_list('''
+ for name in names {
+ println("Hello, \\(name)!")
+ }
+ ''')
+ self.assertEqual(0, len(result))
+
+ def test_one_function(self):
+ result = get_kotlin_function_list('''
+ fun sayGoodbye() { }
+ ''')
+ self.assertEqual(1, len(result))
+ self.assertEqual("sayGoodbye", result[0].name)
+ self.assertEqual(0, result[0].parameter_count)
+ self.assertEqual(1, result[0].cyclomatic_complexity)
+
+ def test_one_with_parameter(self):
+ result = get_kotlin_function_list('''
+ fun sayGoodbye(personName: String, alreadyGreeted: Bool) { }
+ ''')
+ self.assertEqual(1, len(result))
+ self.assertEqual("sayGoodbye", result[0].name)
+ self.assertEqual(2, result[0].parameter_count)
+
+ def test_one_function_with_return_value(self):
+ result = get_kotlin_function_list('''
+ fun sayGoodbye(): String {return "bye"}
+ ''')
+ self.assertEqual(1, len(result))
+ self.assertEqual("sayGoodbye", result[0].name)
+
+ def test_one_lambda_with_return_value(self):
+ result = get_kotlin_function_list('''
+ val sayGoodbye: () -> String = {"bye"}
+ ''')
+ self.assertEqual(1, len(result))
+ self.assertEqual("(anonymous)", result[0].name)
+
+ def test_one_function_with_complexity(self):
+ result = get_kotlin_function_list('''
+ fun sayGoodbye() { if ++diceRoll == 7 { diceRoll = 1 }}
+ ''')
+ self.assertEqual(2, result[0].cyclomatic_complexity)
+
+ def test_interface(self):
+ result = get_kotlin_function_list('''
+ interface p {
+ fun f1(): String
+ fun f2()
+ }
+ fun sayGoodbye() { }
+ ''')
+ self.assertEqual(1, len(result))
+ self.assertEqual("sayGoodbye", result[0].name)
+
+ def test_interface_followed_by_a_class(self):
+ result = get_kotlin_function_list('''
+ interface p {
+ fun f1(): String
+ fun f2()
+ }
+ class c { }
+ ''')
+ self.assertEqual(0, len(result))
+
+ def test_interface_with_vars(self):
+ result = get_kotlin_function_list('''
+ interface p {
+ fun f1(): String
+ fun f2()
+ val p1: String
+ val p2: String
+ get() = "p2"
+ }
+ class c { }
+ ''')
+ self.assertEqual(0, len(result))
+
+ def test_getter(self):
+ result = get_kotlin_function_list('''
+ class Time
+ {
+ var seconds: Double = 17.0
+ var minutes: Double
+ get() = seconds / 60
+ }
+ ''')
+ self.assertEqual("get", result[0].name)
+
+ def test_getter_setter(self):
+ result = get_kotlin_function_list('''
+ class Time
+ {
+ var seconds: Double = 17.0
+ var minutes: Double
+ get() = seconds / 60
+ set(newValue) {
+ this.seconds = (newValue * 60)
+ }
+ }
+ ''')
+ self.assertEqual("get", result[1].name)
+ self.assertEqual("set", result[0].name)
+
+ # https://docs.kotlin.org/kotlin-book/LanguageGuide/Properties.html#ID259
+ def test_explicit_getter_setter(self):
+ result = get_kotlin_function_list('''
+ var center: Point
+ get() = {
+ val centerX = origin.x + (size.width / 2)
+ val centerY = origin.y + (size.height / 2)
+ return Point(x: centerX, y: centerY)
+ }
+ set(newCenter) {
+ origin.x = newCenter.x - (size.width / 2)
+ origin.y = newCenter.y - (size.height / 2)
+ }
+ }
+ ''')
+ self.assertEqual("set", result[0].name)
+ self.assertEqual("get", result[1].name)
+
+ def test_when_cases(self):
+ result = get_kotlin_function_list('''
+ fun cases(x: Int) {
+ when (x) {
+ 0, 1 -> print("x == 0 or x == 1")
+ else -> print("otherwise")
+ }
+ }
+ ''')
+ self.assertEqual("cases", result[0].name)
+ self.assertEqual(2, result[0].cyclomatic_complexity)
+
+ def test_keyword_declarations(self):
+ result = get_kotlin_function_list('''
+ enum class Func {
+ static var `class`: Bool? = false
+ static val `interface` = 0
+ fun `get`() {}
+ }
+ ''')
+ self.assertEqual("`get`", result[0].name)
+
+ def test_generic_function(self):
+ result = get_kotlin_function_list('''
+ fun <T> f() {}
+ ''')
+ self.assertEqual("f", result[0].name)
+
+ def test_complex_generic_function(self):
+ result = get_kotlin_function_list('''
+ fun <C1, C2> f (c1: C1, c: C2): Boolean where C2 : Container {return C2.isEmpty()}
+ ''')
+ self.assertEqual("f", result[0].name)
+ self.assertEqual(2, result[0].parameter_count)
+
+ def test_elvis_operator(self):
+ result = get_kotlin_function_list(''' fun f() {
+ val keep = filteredList?.contains(ingredient) ?: true
+ }
+ ''')
+ self.assertEqual("f", result[0].name)
+ self.assertEqual(2, result[0].cyclomatic_complexity)
+
+ def test_for_label(self):
+ result = get_kotlin_function_list('''
+ fun f0() { something(for: .something) }
+ fun f1() { something(for :.something) }
+ fun f2() { something(for : .something) }
+ fun f3() { something(for: if (isValid) true else false) }
+ fun f4() { something(label1: .something, label2: .something, for: .something) }
+ ''')
+ self.assertEqual(1, result[0].cyclomatic_complexity)
+ self.assertEqual(1, result[1].cyclomatic_complexity)
+ self.assertEqual(1, result[2].cyclomatic_complexity)
+ self.assertEqual(2, result[3].cyclomatic_complexity)
+ self.assertEqual(1, result[4].cyclomatic_complexity)
+
+ def test_nested(self):
+ result = get_kotlin_function_list('''
+ fun bar() : Int {
+ fun a() : Int {
+ // Do a load of stuff
+ return 1
+ }
+ fun b() : Int {
+ // Do a load of stuff
+ return 1
+ }
+ return a() + b()
+ }
+ ''')
+ self.assertEqual(3, len(result))
+ self.assertEqual("a", result[0].name)
+ self.assertEqual("b", result[1].name)
+ self.assertEqual("bar", result[2].name)
|
Kotlin support for lizard?
Hi. Any plans for lizard to be used for kotlin?
|
0.0
|
b93998d08b9cd08c8de91a65964f7a882e8883c9
|
[
"test/test_languages/testKotlin.py::Test_tokenizing_Kotlin::test_dollar_var",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_complex_generic_function",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_elvis_operator",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_empty",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_explicit_getter_setter",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_for_label",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_generic_function",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_getter",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_getter_setter",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_interface",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_interface_followed_by_a_class",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_interface_with_vars",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_keyword_declarations",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_nested",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_no_function",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_function",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_function_with_complexity",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_function_with_return_value",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_lambda_with_return_value",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_one_with_parameter",
"test/test_languages/testKotlin.py::Test_parser_for_Kotlin::test_when_cases"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-06 10:23:16+00:00
|
mit
| 5,861 |
|
testing-cabal__fixtures-58
|
diff --git a/NEWS b/NEWS
index af2c00c..1439d53 100644
--- a/NEWS
+++ b/NEWS
@@ -7,6 +7,7 @@ NEXT
* Dropped support for Python 2.7, Python 3.4 and Python 3.5 (EOL).
* Added support for Python 3.7-3.10.
+* Support all ``subprocess.Popen`` arguments up to Python 3.10.
3.0.0
~~~~~
diff --git a/fixtures/_fixtures/popen.py b/fixtures/_fixtures/popen.py
index c35ed5e..ffa9bf4 100644
--- a/fixtures/_fixtures/popen.py
+++ b/fixtures/_fixtures/popen.py
@@ -20,6 +20,7 @@ __all__ = [
import random
import subprocess
+import sys
from fixtures import Fixture
@@ -126,13 +127,38 @@ class FakePopen(Fixture):
stdin=_unpassed, stdout=_unpassed, stderr=_unpassed,
preexec_fn=_unpassed, close_fds=_unpassed, shell=_unpassed,
cwd=_unpassed, env=_unpassed, universal_newlines=_unpassed,
- startupinfo=_unpassed, creationflags=_unpassed):
+ startupinfo=_unpassed, creationflags=_unpassed,
+ restore_signals=_unpassed, start_new_session=_unpassed,
+ pass_fds=_unpassed, *, group=_unpassed, extra_groups=_unpassed,
+ user=_unpassed, umask=_unpassed, encoding=_unpassed,
+ errors=_unpassed, text=_unpassed, pipesize=_unpassed):
+ # Reject arguments introduced by newer versions of Python in older
+ # versions; this makes it harder to accidentally hide compatibility
+ # problems using test doubles.
+ if sys.version_info < (3, 7) and text is not FakePopen._unpassed:
+ raise TypeError(
+ "FakePopen.__call__() got an unexpected keyword argument "
+ "'text'")
+ if sys.version_info < (3, 9):
+ for arg_name in "group", "extra_groups", "user", "umask":
+ if locals()[arg_name] is not FakePopen._unpassed:
+ raise TypeError(
+ "FakePopen.__call__() got an unexpected keyword "
+ "argument '{}'".format(arg_name))
+ if sys.version_info < (3, 10) and pipesize is not FakePopen._unpassed:
+ raise TypeError(
+ "FakePopen.__call__() got an unexpected keyword argument "
+ "'pipesize'")
+
proc_args = dict(args=args)
local = locals()
for param in [
"bufsize", "executable", "stdin", "stdout", "stderr",
"preexec_fn", "close_fds", "shell", "cwd", "env",
- "universal_newlines", "startupinfo", "creationflags"]:
+ "universal_newlines", "startupinfo", "creationflags",
+ "restore_signals", "start_new_session", "pass_fds", "group",
+ "extra_groups", "user", "umask", "encoding", "errors", "text",
+ "pipesize"]:
if local[param] is not FakePopen._unpassed:
proc_args[param] = local[param]
proc_info = self.get_info(proc_args)
|
testing-cabal/fixtures
|
7aa50f2059dd09cc4321462e5e24310d223c3350
|
diff --git a/fixtures/tests/_fixtures/test_popen.py b/fixtures/tests/_fixtures/test_popen.py
index b0af3d3..cafd98e 100644
--- a/fixtures/tests/_fixtures/test_popen.py
+++ b/fixtures/tests/_fixtures/test_popen.py
@@ -15,6 +15,7 @@
import io
import subprocess
+import sys
import testtools
@@ -48,19 +49,59 @@ class TestFakePopen(testtools.TestCase, TestWithFixtures):
proc = fixture(['foo'])
self.assertEqual('stdout', proc.stdout)
- def test_handles_all_2_7_args(self):
+ def test_handles_all_Popen_args(self):
all_args = dict(
args="args", bufsize="bufsize", executable="executable",
stdin="stdin", stdout="stdout", stderr="stderr",
preexec_fn="preexec_fn", close_fds="close_fds", shell="shell",
cwd="cwd", env="env", universal_newlines="universal_newlines",
- startupinfo="startupinfo", creationflags="creationflags")
+ startupinfo="startupinfo", creationflags="creationflags",
+ restore_signals="restore_signals",
+ start_new_session="start_new_session", pass_fds="pass_fds",
+ encoding="encoding", errors="errors")
+ if sys.version_info >= (3, 7):
+ all_args["text"] = "text"
+ if sys.version_info >= (3, 9):
+ all_args["group"] = "group"
+ all_args["extra_groups"] = "extra_groups"
+ all_args["user"] = "user"
+ all_args["umask"] = "umask"
+ if sys.version_info >= (3, 10):
+ all_args["pipesize"] = "pipesize"
def get_info(proc_args):
self.assertEqual(all_args, proc_args)
return {}
fixture = self.useFixture(FakePopen(get_info))
fixture(**all_args)
+ @testtools.skipUnless(
+ sys.version_info < (3, 7), "only relevant on Python <3.7")
+ def test_rejects_3_7_args_on_older_versions(self):
+ fixture = self.useFixture(FakePopen(lambda proc_args: {}))
+ with testtools.ExpectedException(
+ TypeError, r".* got an unexpected keyword argument 'text'"):
+ fixture(args="args", text=True)
+
+ @testtools.skipUnless(
+ sys.version_info < (3, 9), "only relevant on Python <3.9")
+ def test_rejects_3_9_args_on_older_versions(self):
+ fixture = self.useFixture(FakePopen(lambda proc_args: {}))
+ for arg_name in ("group", "extra_groups", "user", "umask"):
+ kwargs = {arg_name: arg_name}
+ expected_message = (
+ r".* got an unexpected keyword argument '{}'".format(arg_name))
+ with testtools.ExpectedException(TypeError, expected_message):
+ fixture(args="args", **kwargs)
+
+ @testtools.skipUnless(
+ sys.version_info < (3, 10), "only relevant on Python <3.10")
+ def test_rejects_3_10_args_on_older_versions(self):
+ fixture = self.useFixture(FakePopen(lambda proc_args: {}))
+ with testtools.ExpectedException(
+ TypeError,
+ r".* got an unexpected keyword argument 'pipesize'"):
+ fixture(args="args", pipesize=1024)
+
def test_custom_returncode(self):
def get_info(proc_args):
return dict(returncode=1)
|
`FakePopen` is not fully compatible with Python 3.7
Python 3.7 introduced the `text` parameter for `Popen`
`FakePopen` has not yet been adjusted to this
https://github.com/testing-cabal/fixtures/blob/a01ce5350a106dbce313b1a6370593227574379d/fixtures/_fixtures/popen.py#L125-L129
|
0.0
|
7aa50f2059dd09cc4321462e5e24310d223c3350
|
[
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_handles_all_Popen_args"
] |
[
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test___call___is_recorded",
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_custom_returncode",
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_inject_content_stdout",
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_installs_restores_global",
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_rejects_3_10_args_on_older_versions",
"fixtures/tests/_fixtures/test_popen.py::TestFakePopen::test_with_popen_custom",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_args",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_input",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_input_and_stdin",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_out",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_communicate_with_timeout",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_kill",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_poll",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_poll_with_returncode",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_wait",
"fixtures/tests/_fixtures/test_popen.py::TestFakeProcess::test_wait_with_timeout_and_endtime"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-02-09 10:50:21+00:00
|
apache-2.0
| 5,862 |
|
testing-cabal__systemfixtures-9
|
diff --git a/systemfixtures/filesystem.py b/systemfixtures/filesystem.py
index 40d9da0..f26a3ee 100644
--- a/systemfixtures/filesystem.py
+++ b/systemfixtures/filesystem.py
@@ -12,6 +12,7 @@ if six.PY2:
BUILTIN_OPEN = "__builtin__.open"
if six.PY3:
BUILTIN_OPEN = "builtins.open"
+ from os import DirEntry
GENERIC_APIS = (
@@ -139,6 +140,8 @@ class FakeFilesystem(Fixture):
def _is_fake_path_or_fd(self, path, *args, **kwargs):
if isinstance(path, int):
path = self._path_from_fd(path)
+ elif isinstance(path, DirEntry):
+ path = path.name
return self._is_fake_path(path)
def _is_fake_symlink(self, src, dst, *args, **kwargs):
|
testing-cabal/systemfixtures
|
9c0908083a2f8914621ef5068c024ee41f84981a
|
diff --git a/systemfixtures/tests/test_filesystem.py b/systemfixtures/tests/test_filesystem.py
index 5041bb0..ec3d26a 100644
--- a/systemfixtures/tests/test_filesystem.py
+++ b/systemfixtures/tests/test_filesystem.py
@@ -97,6 +97,12 @@ class FakeFilesystemTest(TestCase):
shutil.rmtree("/foo/bar")
self.assertEqual([], os.listdir("/foo"))
+ def test_copytree(self):
+ self.fs.add("/foo")
+ shutil.copytree("./doc", "/foo")
+ self.assertEqual(
+ sorted(os.listdir("./doc")), sorted(os.listdir("/foo")))
+
if six.PY3:
def test_listdir_with_fd(self):
|
shutil.copytree to an overlayed dir fails under Python 3.8
When copying a tree to an overlayed dir, I get the following exception casued by `_is_fake_path` not handling DirEntry params:
```python
shutil.copytree(CHARM_DIR, self.charm_dir)
File "/usr/lib/python3.8/shutil.py", line 554, in copytree
return _copytree(entries=entries, src=src, dst=dst, symlinks=symlinks,
File "/usr/lib/python3.8/shutil.py", line 496, in _copytree
copy_function(srcobj, dstname)
File "/usr/lib/python3.8/shutil.py", line 432, in copy2
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "/usr/lib/python3.8/shutil.py", line 261, in copyfile
with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst:
File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/_overlay.py", line 23, in _new_value
if self.condition(*args, **kwargs):
File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/filesystem.py", line 146, in _is_fake_path_or_fd
return self._is_fake_path(path)
File "/home/nessita/canonical/franky/env/lib/python3.8/site-packages/systemfixtures/filesystem.py", line 133, in _is_fake_path
if path.startswith(prefix):
AttributeError: 'posix.DirEntry' object has no attribute 'startswith'
```
A possible fix would be something like this:
```python
129 if isinstance(path, os.DirEntry):
130 path = path.name
```
|
0.0
|
9c0908083a2f8914621ef5068c024ee41f84981a
|
[
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_copytree"
] |
[
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add_non_absolute",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_add_sub_paths",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_chmod",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_chown",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_fchown",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_glob",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_listdir_with_fd",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_readlink_to_fake_path",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_readlink_to_real_path",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_rename",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_rmtree",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_sqlite3",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_symlink",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_unlink",
"systemfixtures/tests/test_filesystem.py::FakeFilesystemTest::test_walk"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-10-29 12:39:29+00:00
|
mit
| 5,863 |
|
testingautomated-usi__uncertainty-wizard-152
|
diff --git a/uncertainty_wizard/quantifiers/mean_softmax.py b/uncertainty_wizard/quantifiers/mean_softmax.py
index 6c32df0..d8f01f6 100644
--- a/uncertainty_wizard/quantifiers/mean_softmax.py
+++ b/uncertainty_wizard/quantifiers/mean_softmax.py
@@ -17,7 +17,7 @@ class MeanSoftmax(ConfidenceQuantifier):
# docstr-coverage:inherited
@classmethod
def aliases(cls) -> List[str]:
- return ["mean_softmax", "ensembling", "ms"]
+ return ["mean_softmax", "ensembling", "ms", "MeanSoftmax"]
# docstr-coverage:inherited
@classmethod
diff --git a/uncertainty_wizard/quantifiers/mutual_information.py b/uncertainty_wizard/quantifiers/mutual_information.py
index adfe725..5a0bc11 100644
--- a/uncertainty_wizard/quantifiers/mutual_information.py
+++ b/uncertainty_wizard/quantifiers/mutual_information.py
@@ -30,7 +30,7 @@ class MutualInformation(UncertaintyQuantifier):
# docstr-coverage:inherited
@classmethod
def aliases(cls) -> List[str]:
- return ["mutu_info", "mutual_information", "mi"]
+ return ["mutu_info", "mutual_information", "mi", "MutualInformation"]
# docstr-coverage:inherited
@classmethod
diff --git a/uncertainty_wizard/quantifiers/predictive_entropy.py b/uncertainty_wizard/quantifiers/predictive_entropy.py
index d29c39b..8a26a64 100644
--- a/uncertainty_wizard/quantifiers/predictive_entropy.py
+++ b/uncertainty_wizard/quantifiers/predictive_entropy.py
@@ -44,7 +44,7 @@ class PredictiveEntropy(UncertaintyQuantifier):
# docstr-coverage:inherited
@classmethod
def aliases(cls) -> List[str]:
- return ["predictive_entropy", "pred_entropy", "PE"]
+ return ["predictive_entropy", "pred_entropy", "PE", "PredictiveEntropy"]
# docstr-coverage:inherited
@classmethod
diff --git a/uncertainty_wizard/quantifiers/variation_ratio.py b/uncertainty_wizard/quantifiers/variation_ratio.py
index 6083373..aec4c46 100644
--- a/uncertainty_wizard/quantifiers/variation_ratio.py
+++ b/uncertainty_wizard/quantifiers/variation_ratio.py
@@ -30,7 +30,7 @@ class VariationRatio(UncertaintyQuantifier):
# docstr-coverage:inherited
@classmethod
def aliases(cls) -> List[str]:
- return ["variation_ratio", "vr", "var_ratio"]
+ return ["variation_ratio", "vr", "var_ratio", "VariationRatio"]
# docstr-coverage:inherited
@classmethod
|
testingautomated-usi/uncertainty-wizard
|
04fbec4de6c8f9ab70d7cd38891a225204706c11
|
diff --git a/tests_unit/quantifiers_tests/test_mean_softmax.py b/tests_unit/quantifiers_tests/test_mean_softmax.py
index 02f1cfd..0b03011 100644
--- a/tests_unit/quantifiers_tests/test_mean_softmax.py
+++ b/tests_unit/quantifiers_tests/test_mean_softmax.py
@@ -17,6 +17,8 @@ class TestMeanSoftmax(TestCase):
isinstance(QuantifierRegistry.find("mean_softmax"), MeanSoftmax)
)
self.assertTrue(isinstance(QuantifierRegistry.find("ensembling"), MeanSoftmax))
+ self.assertTrue(isinstance(QuantifierRegistry.find("MS"), MeanSoftmax))
+ self.assertTrue(isinstance(QuantifierRegistry.find("MeanSoftmax"), MeanSoftmax))
def test_is_confidence(self):
self.assertTrue(MeanSoftmax.is_confidence())
diff --git a/tests_unit/quantifiers_tests/test_mutual_information.py b/tests_unit/quantifiers_tests/test_mutual_information.py
index 7eb2c11..fafc4dc 100644
--- a/tests_unit/quantifiers_tests/test_mutual_information.py
+++ b/tests_unit/quantifiers_tests/test_mutual_information.py
@@ -19,6 +19,10 @@ class TestMutualInformation(TestCase):
self.assertTrue(
isinstance(QuantifierRegistry.find("mutu_info"), MutualInformation)
)
+ self.assertTrue(isinstance(QuantifierRegistry.find("MI"), MutualInformation))
+ self.assertTrue(
+ isinstance(QuantifierRegistry.find("MutualInformation"), MutualInformation)
+ )
def test_is_confidence(self):
self.assertFalse(MutualInformation.is_confidence())
diff --git a/tests_unit/quantifiers_tests/test_one_shot_classifiers.py b/tests_unit/quantifiers_tests/test_one_shot_classifiers.py
index 85bf274..1b2b6a2 100644
--- a/tests_unit/quantifiers_tests/test_one_shot_classifiers.py
+++ b/tests_unit/quantifiers_tests/test_one_shot_classifiers.py
@@ -63,6 +63,12 @@ class TestPCS(TestCase):
self.assertTrue(
isinstance(QuantifierRegistry.find("PCS"), PredictionConfidenceScore)
)
+ self.assertTrue(
+ isinstance(
+ QuantifierRegistry.find("PredictionConfidenceScore"),
+ PredictionConfidenceScore,
+ )
+ )
self.assertTrue(
isinstance(
QuantifierRegistry.find("prediction_confidence_score"),
@@ -140,6 +146,7 @@ class TestSoftmax(TestCase):
def test_string_representation(self):
self.assertTrue(isinstance(QuantifierRegistry.find("softmax"), MaxSoftmax))
self.assertTrue(isinstance(QuantifierRegistry.find("max_softmax"), MaxSoftmax))
+ self.assertTrue(isinstance(QuantifierRegistry.find("MaxSoftmax"), MaxSoftmax))
def test_is_confidence(self):
self.assertTrue(MaxSoftmax.is_confidence())
@@ -213,6 +220,7 @@ class TestSoftmaxEntropy(TestCase):
self.assertTrue(
isinstance(QuantifierRegistry.find("SoftmaxEntropy"), SoftmaxEntropy)
)
+ self.assertTrue(isinstance(QuantifierRegistry.find("SE"), SoftmaxEntropy))
def test_is_confidence(self):
self.assertFalse(SoftmaxEntropy.is_confidence())
diff --git a/tests_unit/quantifiers_tests/test_predictive_entropy.py b/tests_unit/quantifiers_tests/test_predictive_entropy.py
index cebcf67..60be40f 100644
--- a/tests_unit/quantifiers_tests/test_predictive_entropy.py
+++ b/tests_unit/quantifiers_tests/test_predictive_entropy.py
@@ -19,6 +19,10 @@ class TestPredictiveEntropy(TestCase):
self.assertTrue(
isinstance(QuantifierRegistry.find("pred_entropy"), PredictiveEntropy)
)
+ self.assertTrue(isinstance(QuantifierRegistry.find("PE"), PredictiveEntropy))
+ self.assertTrue(
+ isinstance(QuantifierRegistry.find("PredictiveEntropy"), PredictiveEntropy)
+ )
def test_is_confidence(self):
self.assertFalse(PredictiveEntropy.is_confidence())
diff --git a/tests_unit/quantifiers_tests/test_stddev.py b/tests_unit/quantifiers_tests/test_stddev.py
index df43465..dbbea1c 100644
--- a/tests_unit/quantifiers_tests/test_stddev.py
+++ b/tests_unit/quantifiers_tests/test_stddev.py
@@ -17,6 +17,9 @@ class TestStandardDeviation(TestCase):
isinstance(QuantifierRegistry.find("standard_deviation"), StandardDeviation)
)
self.assertTrue(isinstance(QuantifierRegistry.find("std"), StandardDeviation))
+ self.assertTrue(
+ isinstance(QuantifierRegistry.find("StandardDeviation"), StandardDeviation)
+ )
self.assertTrue(
isinstance(QuantifierRegistry.find("stddev"), StandardDeviation)
)
diff --git a/tests_unit/quantifiers_tests/test_variation_ratio.py b/tests_unit/quantifiers_tests/test_variation_ratio.py
index 8462857..a814631 100644
--- a/tests_unit/quantifiers_tests/test_variation_ratio.py
+++ b/tests_unit/quantifiers_tests/test_variation_ratio.py
@@ -20,6 +20,9 @@ class TestVariationRatio(TestCase):
isinstance(QuantifierRegistry.find("var_ratio"), VariationRatio)
)
self.assertTrue(isinstance(QuantifierRegistry.find("VR"), VariationRatio))
+ self.assertTrue(
+ isinstance(QuantifierRegistry.find("VariationRatio"), VariationRatio)
+ )
def test_is_confidence(self):
self.assertFalse(VariationRatio.is_confidence())
|
Alias `VariationRatio` is missing
the VariationRatio quantifier misses the corresponding class-name alias, which should be there, according to the docs:
https://uncertainty-wizard.readthedocs.io/en/latest/user_guide_quantifiers.html
|
0.0
|
04fbec4de6c8f9ab70d7cd38891a225204706c11
|
[
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_string_representation",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_string_representation",
"tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_string_representation",
"tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_string_representation"
] |
[
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_as_confidence_flag",
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_is_confidence",
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_problem_type",
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_single_input_no_entropy",
"tests_unit/quantifiers_tests/test_mean_softmax.py::TestMeanSoftmax::test_two_inputs_high_pred_entropy",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_as_confidence_flag",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_is_confidence",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_problem_type",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_single_input_no_entropy",
"tests_unit/quantifiers_tests/test_mutual_information.py::TestMutualInformation::test_two_inputs_high_pred_entropy",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_is_confidence",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_problem_type",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_quantification",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestDeepGini::test_string_representation",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_duplicate_non_winner",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_duplicate_winner",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_happy_path_batch",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_happy_path_single",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_is_confidence",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_problem_type",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestPCS::test_string_representation",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_duplicate_non_winner",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_duplicate_winner",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_happy_path_batch",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_happy_path_single",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_is_confidence",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_problem_type",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmax::test_string_representation",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_duplicate_non_winner",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_duplicate_winner",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_happy_path_batch",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_happy_path_single",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_is_confidence",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_problem_type",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_one_shot_classifiers.py::TestSoftmaxEntropy::test_string_representation",
"tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_is_confidence",
"tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_problem_type",
"tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_single_input_no_entropy",
"tests_unit/quantifiers_tests/test_predictive_entropy.py::TestPredictiveEntropy::test_two_inputs_high_pred_entropy",
"tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_happy_path_single",
"tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_is_confidence",
"tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_problem_type",
"tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_samples_type_declaration",
"tests_unit/quantifiers_tests/test_stddev.py::TestStandardDeviation::test_string_representation",
"tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_happy_path_batch",
"tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_happy_path_single",
"tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_is_confidence",
"tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_problem_type",
"tests_unit/quantifiers_tests/test_variation_ratio.py::TestVariationRatio::test_samples_type_declaration"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-03 08:42:43+00:00
|
mit
| 5,864 |
|
textile__python-textile-41
|
diff --git a/textile/core.py b/textile/core.py
index 692cca4..9c6623f 100644
--- a/textile/core.py
+++ b/textile/core.py
@@ -230,7 +230,7 @@ class Textile(object):
self.unreferencedNotes = OrderedDict()
self.notelist_cache = OrderedDict()
- if text == '':
+ if text.strip() == '':
return text
if self.restricted:
@@ -811,7 +811,7 @@ class Textile(object):
"""If we find a closing square bracket we are going to see if it is
balanced. If it is balanced with matching opening bracket then it
is part of the URL else we spit it back out of the URL."""
- # If counts['['] is None, count the occurrences of '['
+ # If counts['['] is None, count the occurrences of '['
counts['['] = counts['['] or url.count('[')
if counts['['] == counts[']']:
|
textile/python-textile
|
f2a9408cdeea585861d76f6fa85e1ba37c9d011f
|
diff --git a/tests/test_github_issues.py b/tests/test_github_issues.py
index bf9c339..b8a8330 100644
--- a/tests/test_github_issues.py
+++ b/tests/test_github_issues.py
@@ -91,3 +91,9 @@ def test_github_issue_36():
result = textile.textile(text)
expect = '\t<p><a href="https://www.google.com/search?q=Chögyam+Trungpa">Chögyam Trungpa</a></p>'
assert result == expect
+
+def test_github_issue_40():
+ text = '\r\n'
+ result = textile.textile(text)
+ expect = '\r\n'
+ assert result == expect
|
IndexError string index out of range on whitespace only string
When I try to process a string that is whitespace only, I get an `IndexError`:
```
In [1]: from textile import textile
In [2]: textile(' ')
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
```
Ref: https://github.com/textile/python-textile/issues/26
|
0.0
|
f2a9408cdeea585861d76f6fa85e1ba37c9d011f
|
[
"tests/test_github_issues.py::test_github_issue_40"
] |
[
"tests/test_github_issues.py::test_github_issue_16",
"tests/test_github_issues.py::test_github_issue_17",
"tests/test_github_issues.py::test_github_issue_20",
"tests/test_github_issues.py::test_github_issue_21",
"tests/test_github_issues.py::test_github_issue_22",
"tests/test_github_issues.py::test_github_issue_26",
"tests/test_github_issues.py::test_github_issue_27",
"tests/test_github_issues.py::test_github_issue_28",
"tests/test_github_issues.py::test_github_issue_36"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-03-30 21:26:10+00:00
|
mit
| 5,865 |
|
theavey__ParaTemp-31
|
diff --git a/paratemp/coordinate_analysis.py b/paratemp/coordinate_analysis.py
index a204645..924d998 100644
--- a/paratemp/coordinate_analysis.py
+++ b/paratemp/coordinate_analysis.py
@@ -112,7 +112,7 @@ class Universe(MDa.Universe):
if overwrite or ('/'+time not in store.keys()):
store[time] = self._data
else:
- store_cols = store.get_node(time).axis0.read()
+ store_cols = store.get_node(time).axis0.read().astype(str)
set_diff_cols = set(self._data.columns).difference(store_cols)
if not set_diff_cols:
if self._verbosity:
@@ -160,10 +160,12 @@ class Universe(MDa.Universe):
if self._verbosity:
print('No data to read in '
'{}[{}]'.format(filename, time))
+ return
for key in keys_to_read:
self._data[key] = read_df[key]
def calculate_distances(self, recalculate=False, ignore_file_change=False,
+ read_data=True, save_data=True,
*args, **kwargs):
"""
Calculate distances by iterating through the trajectory
@@ -180,6 +182,13 @@ class Universe(MDa.Universe):
the file has changed will be printed.
If False, if the length of the trajectory has changed,
FileChangedError will be raised.
+ :param bool read_data: Default: True.
+ If True, :func:`read_data` will be used to read any data in the
+ default file with `ignore_no_data=True`.
+ :param bool save_data: Default: True.
+ If True, :func:`save_data` will be used to save the calculated
+ distances to the default file.
+ Nothing will be saved if there is nothing new to calculate.
:param args:
:param kwargs:
:return: None
@@ -190,6 +199,11 @@ class Universe(MDa.Universe):
# TODO document this function
# TODO find a way to take keyword type args with non-valid python
# identifiers (e.g., "O-O").
+ if read_data:
+ v = self._verbosity
+ self._verbosity = False
+ self.read_data(ignore_no_data=True)
+ self._verbosity = v
# Make empty atom selections to be appended to:
first_group = self.select_atoms('protein and not protein')
second_group = self.select_atoms('protein and not protein')
@@ -270,6 +284,8 @@ class Universe(MDa.Universe):
result=dists[i])
for i, column in enumerate(column_names):
self._data[column] = dists[:, i]
+ if save_data:
+ self.save_data()
def calculate_dihedrals(self, *args, **kwargs):
""""""
diff --git a/requirements.txt b/requirements.txt
index 4783d85..6719eeb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,6 +6,7 @@ matplotlib
panedr
py
gromacswrapper
+tables
typing
scipy
six
diff --git a/setup.py b/setup.py
index 141b571..fc3bb71 100644
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,7 @@ setup(
'matplotlib',
'panedr',
'gromacswrapper',
+ 'tables',
'typing',
'scipy',
'six',
|
theavey/ParaTemp
|
a8c11fea5fd99af1e66418aab6ac1743ea527cce
|
diff --git a/tests/test_coordinate_analysis.py b/tests/test_coordinate_analysis.py
index fc4f174..2ea9283 100644
--- a/tests/test_coordinate_analysis.py
+++ b/tests/test_coordinate_analysis.py
@@ -24,9 +24,11 @@
from __future__ import absolute_import
-import pytest
-import numpy as np
import matplotlib
+import numpy as np
+import pandas as pd
+import py
+import pytest
matplotlib.use('agg')
@@ -41,24 +43,35 @@ def test_matplotlib_testing_backend():
class TestXTCUniverse(object):
@pytest.fixture
- def univ(self):
+ def univ(self, tmpdir):
from paratemp import coordinate_analysis as ca
- _univ = ca.Universe('tests/test-data/spc2.gro',
- 'tests/test-data/t-spc2-traj.xtc',
- temp=205.)
+ gro = py.path.local('tests/test-data/spc2.gro')
+ traj = py.path.local('tests/test-data/t-spc2-traj.xtc')
+ gro.copy(tmpdir)
+ traj.copy(tmpdir)
+ with tmpdir.as_cwd():
+ _univ = ca.Universe(gro.basename,
+ traj.basename,
+ temp=205.)
return _univ
@pytest.fixture
def univ_w_a(self, univ):
- univ.calculate_distances(a='4 5')
+ univ.calculate_distances(a='4 5',
+ read_data=False, save_data=False)
return univ
@pytest.fixture
- def univ_pbc(self):
+ def univ_pbc(self, tmpdir):
from paratemp import coordinate_analysis as ca
- _univ = ca.Universe('tests/test-data/spc2.gro',
- 'tests/test-data/spc2-traj-pbc.xtc',
- temp=205.)
+ gro = py.path.local('tests/test-data/spc2.gro')
+ traj = py.path.local('tests/test-data/spc2-traj-pbc.xtc')
+ gro.copy(tmpdir)
+ traj.copy(tmpdir)
+ with tmpdir.as_cwd():
+ _univ = ca.Universe(gro.basename,
+ traj.basename,
+ temp=205.)
return _univ
@pytest.fixture
@@ -93,19 +106,23 @@ class TestXTCUniverse(object):
return np.load('tests/ref-data/spc2-fes1d-bins-20.npy')
def test_distance_str(self, univ, ref_a_dists):
- univ.calculate_distances(a='4 5')
+ univ.calculate_distances(a='4 5',
+ read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_distance_list_int(self, univ, ref_a_dists):
- univ.calculate_distances(a=[4, 5])
+ univ.calculate_distances(a=[4, 5],
+ read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_distance_list_str(self, univ, ref_a_dists):
- univ.calculate_distances(a=['4', '5'])
+ univ.calculate_distances(a=['4', '5'],
+ read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_calculate_distances_no_recalc(self, univ_w_a, capsys):
- univ_w_a.calculate_distances(a=[4, 5])
+ univ_w_a.calculate_distances(a=[4, 5],
+ read_data=False, save_data=False)
out, err = capsys.readouterr()
assert out == 'Nothing (new) to calculate here.\n'
@@ -113,11 +130,13 @@ class TestXTCUniverse(object):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
"""
- univ_w_a.calculate_distances(a='5 5', recalculate=True)
+ univ_w_a.calculate_distances(a='5 5', recalculate=True,
+ read_data=False, save_data=False)
assert (np.array([0., 0.]) == univ_w_a.data['a']).all()
def test_distance_pbc(self, univ_pbc, ref_a_pbc_dists):
- univ_pbc.calculate_distances(a='4 5')
+ univ_pbc.calculate_distances(a='4 5',
+ read_data=False, save_data=False)
assert np.isclose(ref_a_pbc_dists['a'], univ_pbc.data['a']).all()
def test_calc_fes_1d(self, univ_w_a, ref_delta_g, ref_bins, ref_delta_g_20,
@@ -145,7 +164,7 @@ class TestXTCUniverse(object):
def test_fes_1d_data_str(self, univ_w_a, ref_delta_g, ref_bins):
"""
- :type univ_w_a: paratemp.coordinate_analysiss.Universe
+ :type univ_w_a: paratemp.coordinate_analysis.Universe
:type ref_delta_g: np.ndarray
:type ref_bins: np.ndarray
"""
@@ -174,6 +193,102 @@ class TestXTCUniverse(object):
univ._last_time = 5.1e12
assert univ.final_time_str == '5100ms'
+ def test_save_data(self, univ_w_a, tmpdir, capsys):
+ time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
+ f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
+ with tmpdir.as_cwd():
+ univ_w_a.save_data()
+ out, err = capsys.readouterr()
+ assert tmpdir.join(f_name).exists()
+ with pd.HDFStore(f_name) as store:
+ df = store[time]
+ assert out == 'Saved data to {f_name}[{time}]\n'.format(
+ f_name=f_name, time=time)
+ assert np.allclose(df, univ_w_a.data)
+
+ def test_save_data_no_new(self, univ_w_a, tmpdir, capsys):
+ time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
+ f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
+ with tmpdir.as_cwd():
+ univ_w_a.save_data()
+ capsys.readouterr()
+ univ_w_a.save_data()
+ out, err = capsys.readouterr()
+ assert tmpdir.join(f_name).exists()
+ with pd.HDFStore(f_name) as store:
+ df = store[time]
+ assert out == 'No data added to {f_name}[{time}]\n'.format(
+ f_name=f_name, time=time)
+ assert np.allclose(df, univ_w_a.data)
+
+ def test_save_data_add_new(self, univ, univ_w_a, tmpdir, capsys):
+ time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
+ f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
+ with tmpdir.as_cwd():
+ univ_w_a.save_data()
+ capsys.readouterr()
+ univ.calculate_distances(b='4 5', save_data=False)
+ univ.save_data()
+ out, err = capsys.readouterr()
+ assert out == 'Saved data to {f_name}[{time}]\n'.format(
+ f_name=f_name, time=time)
+
+ def test_read_data(self, univ, univ_w_a, tmpdir, capsys):
+ """
+ :type univ_w_a: paratemp.Universe
+ :type univ: paratemp.Universe
+ """
+ with tmpdir.as_cwd():
+ univ_w_a.save_data()
+ capsys.readouterr() # just so it doesn't print
+ univ.read_data()
+ assert (univ_w_a.data == univ.data).all().all()
+
+ def test_read_data_no_data(self, univ, tmpdir, capsys):
+ """
+ :type univ: paratemp.Universe
+ """
+ time = 'time_' + str(int(univ._last_time / 1000)) + 'ns'
+ f_name = univ.trajectory.filename.replace('xtc', 'h5')
+ with tmpdir.as_cwd():
+ with pytest.raises(IOError, message='This data does not exist!\n'
+ '{}[{}]\n'.format(f_name,
+ time)):
+ univ.read_data()
+ univ.read_data(ignore_no_data=True)
+ out, err = capsys.readouterr()
+ assert out == 'No data to read in {}[{}]\n'.format(f_name, time)
+
+ def test_calculate_distances_save(self, univ, tmpdir, capsys):
+ """
+ :type univ: paratemp.Universe
+ """
+ time = 'time_' + str(int(univ._last_time / 1000)) + 'ns'
+ f_name = univ.trajectory.filename.replace('xtc', 'h5')
+ with tmpdir.as_cwd():
+ univ.calculate_distances(a='4 5')
+ out, err = capsys.readouterr()
+ assert tmpdir.join(f_name).exists()
+ with pd.HDFStore(f_name) as store:
+ df = store[time]
+ assert out == 'Saved data to {f_name}[{time}]\n'.format(
+ f_name=f_name, time=time)
+ assert np.allclose(df, univ.data)
+
+ def test_calculate_distances_read(self, univ_w_a, tmpdir, capsys):
+ """
+ :type univ_w_a: paratemp.Universe
+ """
+ with tmpdir.as_cwd():
+ univ_w_a.save_data()
+ capsys.readouterr()
+ univ_w_a._data = univ_w_a._init_dataframe()
+ univ_w_a.calculate_distances(a='4 5')
+ out, err = capsys.readouterr()
+ assert out == 'Nothing (new) to calculate here.\n'
+
+
+
# TODO add further Universe tests
# ignore_file_change=True
# fes_2d
|
make read_data and save_data defaults for Universe.calculate...
At least for me, I generally want to run these anyway, so it would make sense to make them run by default to save as much time as possible. I don't imagine it would change the memory usage that much.
Plus, if it's saved, it's much easier to read it back in later if the RAM needs to be freed.
|
0.0
|
a8c11fea5fd99af1e66418aab6ac1743ea527cce
|
[
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_str",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_list_int",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_list_str",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_calculate_distances_no_recalc",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_calculate_distances_yes_recalc",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_distance_pbc",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_calc_fes_1d",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_fes_1d_data_str",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_fes_1d_data_data"
] |
[
"tests/test_coordinate_analysis.py::test_matplotlib_testing_backend",
"tests/test_coordinate_analysis.py::TestXTCUniverse::test_final_time_str"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-06-12 19:42:20+00:00
|
apache-2.0
| 5,866 |
|
theelous3__asks-100
|
diff --git a/asks/request_object.py b/asks/request_object.py
index b9bfd32..6c05b00 100644
--- a/asks/request_object.py
+++ b/asks/request_object.py
@@ -424,7 +424,8 @@ class RequestProcessor:
return c_type, str(len(body)), body
- def _dict_to_query(self, data, params=True, base_query=False):
+ @staticmethod
+ def _dict_to_query(data, params=True, base_query=False):
'''
Turns python dicts in to valid body-queries or queries for use directly
in the request url. Unlike the stdlib quote() and it's variations,
@@ -439,7 +440,7 @@ class RequestProcessor:
query = []
for k, v in data.items():
- if not v:
+ if v is None:
continue
if isinstance(v, (str, Number)):
query.append('='.join(quote_plus(x) for x in (k, str(v))))
|
theelous3/asks
|
733f277b7fa84e17afb4d8c3b0895bec7efb8ab4
|
diff --git a/tests/test_request_object.py b/tests/test_request_object.py
index 0f1c989..9627a99 100644
--- a/tests/test_request_object.py
+++ b/tests/test_request_object.py
@@ -1,6 +1,7 @@
# pylint: disable=no-member
import h11
+import pytest
from asks.request_object import RequestProcessor
@@ -32,3 +33,13 @@ def test_http1_1(monkeypatch):
def test_http1_0(monkeypatch):
response = _catch_response(monkeypatch, [('Connection', 'close')], b'hello')
assert response.body == b'hello'
+
+
[email protected](['data', 'query_str'], [
+ [{'foo': 'bar', 'spam': None}, '?foo=bar'],
+ [{'zero': 0}, '?zero=0'],
+ [{'empty': ''}, '?empty='],
+ [{'false': False}, '?false=False'],
+])
+def test_dict_to_query(data, query_str):
+ assert RequestProcessor._dict_to_query(data) == query_str
|
`request_object.RequestProcessor._dict_to_query` skips dict values evaluating to `False`
Test case:
```python
response = await asks.get('https://httpbin.org/get', params={
'foo': 'bar',
'zero': 0,
'empty': '',
'false': False
})
payload = response.json()
assert 'foo' in payload['args'] # OK
assert 'zero' in payload['args'] # Fail
assert 'empty' in payload['args'] # Fail
assert 'false' in payload['args'] # Fail
```
Erroneous code:
https://github.com/theelous3/asks/blob/733f277b7fa84e17afb4d8c3b0895bec7efb8ab4/asks/request_object.py#L442-L443
|
0.0
|
733f277b7fa84e17afb4d8c3b0895bec7efb8ab4
|
[
"tests/test_request_object.py::test_dict_to_query[data0-?foo=bar]",
"tests/test_request_object.py::test_dict_to_query[data1-?zero=0]",
"tests/test_request_object.py::test_dict_to_query[data2-?empty=]",
"tests/test_request_object.py::test_dict_to_query[data3-?false=False]"
] |
[
"tests/test_request_object.py::test_http1_1",
"tests/test_request_object.py::test_http1_0"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-15 08:14:55+00:00
|
mit
| 5,867 |
|
theelous3__asks-173
|
diff --git a/asks/http_utils.py b/asks/http_utils.py
index 45ae42d..9d8d56b 100644
--- a/asks/http_utils.py
+++ b/asks/http_utils.py
@@ -3,18 +3,15 @@ Utilities for handling some aspects of http
"""
-__all__ = ["decompress", "parse_content_encoding"]
+__all__ = ["decompress", "decompress_one", "parse_content_encoding"]
-from gzip import decompress as gdecompress
-from zlib import decompress as zdecompress
+import codecs
+from zlib import decompressobj, MAX_WBITS
from .utils import processor
-_compression_mapping = {"gzip": gdecompress, "deflate": zdecompress}
-
-
def parse_content_encoding(content_encoding: str) -> [str]:
compressions = [x.strip() for x in content_encoding.split(",")]
return compressions
@@ -23,11 +20,47 @@ def parse_content_encoding(content_encoding: str) -> [str]:
@processor
def decompress(compressions, encoding=None):
data = b""
+ # https://tools.ietf.org/html/rfc7231
+ # "If one or more encodings have been applied to a representation, the
+ # sender that applied the encodings MUST generate a Content-Encoding
+ # header field that lists the content codings in the order in which
+ # they were applied."
+ # Thus, reversed(compressions).
+ decompressors = [
+ decompress_one(compression) for compression in reversed(compressions)
+ ]
+ if encoding:
+ decompressors.append(make_decoder_shim(encoding))
+ while True:
+ data = yield data
+ for decompressor in decompressors:
+ data = decompressor.send(data)
+
+
+# https://tools.ietf.org/html/rfc7230#section-4.2.1 - #section-4.2.3
+
+DECOMPRESS_WBITS = {
+ "deflate": MAX_WBITS,
+ "gzip": MAX_WBITS + 16,
+ "x-gzip": MAX_WBITS + 16,
+}
+
+
+@processor
+def decompress_one(compression):
+ data = b""
+ decompressor = decompressobj(wbits=DECOMPRESS_WBITS[compression])
+ while True:
+ data = yield data
+ data = decompressor.decompress(data)
+ yield decompressor.flush()
+
+
+@processor
+def make_decoder_shim(encoding):
+ data = b""
+ decoder = codecs.getincrementaldecoder(encoding)(errors="replace")
while True:
- if encoding:
- data = yield data.decode(encoding, errors="replace")
- else:
- data = yield data
- for compression in compressions:
- if compression in _compression_mapping:
- data = _compression_mapping[compression](data)
+ data = yield data
+ data = decoder.decode(data)
+ yield decoder.decode(b"", final=True)
|
theelous3/asks
|
774af51d69ffff0245801d7b4b79a97e6318f5f9
|
diff --git a/tests/test_http_utils.py b/tests/test_http_utils.py
new file mode 100644
index 0000000..026407b
--- /dev/null
+++ b/tests/test_http_utils.py
@@ -0,0 +1,45 @@
+import zlib
+import gzip
+
+import pytest
+
+from asks import http_utils
+
+INPUT_DATA = b"abcdefghijklmnopqrstuvwxyz"
+UNICODE_INPUT_DATA = "\U0001f408\U0001F431" * 5
+
+
[email protected](
+ "compressor,name", [(zlib.compress, "deflate"), (gzip.compress, "gzip")]
+)
+def test_decompress_one_zlib(compressor, name):
+ data = zlib.compress(INPUT_DATA)
+ decompressor = http_utils.decompress_one("deflate")
+ result = b""
+ for i in range(len(data)):
+ b = data[i : i + 1]
+ result += decompressor.send(b)
+ assert result == INPUT_DATA
+
+
+def test_decompress():
+ # we don't expect to see multiple compression types in the wild
+ # but test anyway
+ data = zlib.compress(gzip.compress(INPUT_DATA))
+ decompressor = http_utils.decompress(["gzip", "deflate"])
+ result = b""
+ for i in range(len(data)):
+ b = data[i : i + 1]
+ result += decompressor.send(b)
+ assert result == INPUT_DATA
+
+
+def test_decompress_decoding():
+ data = zlib.compress(UNICODE_INPUT_DATA.encode("utf-8"))
+ decompressor = http_utils.decompress(["deflate"], encoding="utf-8")
+ result = ""
+ for i in range(len(data)):
+ b = data[i : i + 1]
+ res = decompressor.send(b)
+ result += res
+ assert result == UNICODE_INPUT_DATA
|
Chunked and encoded gzip not decompressing correctly in streams
From https://github.com/theelous3/asks/issues/95 specifically https://github.com/theelous3/asks/issues/95#issuecomment-435187332
The gzip module blows and isn't sophisticated enough to decompress streams. Drop it in favour of full zlib.
https://docs.python.org/3/library/zlib.html#zlib.decompress
https://stackoverflow.com/a/22311297
|
0.0
|
774af51d69ffff0245801d7b4b79a97e6318f5f9
|
[
"tests/test_http_utils.py::test_decompress_one_zlib[compress-deflate]",
"tests/test_http_utils.py::test_decompress_one_zlib[compress-gzip]",
"tests/test_http_utils.py::test_decompress",
"tests/test_http_utils.py::test_decompress_decoding"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-21 13:37:59+00:00
|
mit
| 5,868 |
|
thegetty__crom-50
|
diff --git a/README.md b/README.md
index 73b4a2a..bab3576 100644
--- a/README.md
+++ b/README.md
@@ -18,12 +18,20 @@ Import the classes from the model module. As the classes are dynamically generat
```python
from cromulent.model import factory, Group
-g1 = Group("Organization")
-g2 = Group("Department")
+g1 = Group(ident="Organization")
+g2 = Group(ident="Department")
g1.member = g2
print factory.toString(g1, compact=False)
```
+The constructor for the classes takes the following parameters:
+
+* `ident` - an identifier to use for this instance. If specified, it should be a URI represented as a string. If it is the empty string, it will result in no identifier. If not specified, or specified as `None`, then it will be auto-generated by the factory if `auto_assign_id` is true, or if `auto_assign_id` is false, then it will result in no identifier.
+* `label` - a human readable label for the resource, to act as internal documentation for the data
+* `value` or `content` - a data value for the class. Dimensions and MonetaryAmounts use `value` which must be a number, and Name, Identifier, LinguisticObject and similar use `content` which must be a string.
+* Additional keywords may be passed in, and will be sent to class-specific initialization code.
+
+
### Vocabulary
```python
@@ -38,6 +46,7 @@ print factory.toString(h, compact=False)
* Assigning to the same property repeatedly does NOT overwrite the value, instead it appends. To overwrite a value, instead set it to a false value first.
+
### Factory settings
There are quite a few settings for how the module works, which are managed by a `factory` object.
diff --git a/cromulent/model.py b/cromulent/model.py
index f5ab3ae..05e9034 100644
--- a/cromulent/model.py
+++ b/cromulent/model.py
@@ -358,9 +358,9 @@ class ExternalResource(object):
_type = ""
_embed = True
- def __init__(self, ident=""):
+ def __init__(self, ident=None):
self._factory = factory
- if ident:
+ if ident is not None:
if ident.startswith('urn:uuid'):
self.id = ident
elif ident.startswith('http'):
@@ -378,6 +378,9 @@ class ExternalResource(object):
ident = "%s:%s" % (self._factory.prefixes_rev[pref], rest)
self.id = ident
+ elif ident == "":
+ # Allow explicit setting of empty string
+ self.id = ""
else:
# Allow for prefixed term
curied = ident.split(':', 1)
@@ -386,10 +389,10 @@ class ExternalResource(object):
self._full_id = self._factory.prefixes[curied[0]] + curied[1]
else:
self.id = factory.base_url + self.__class__._uri_segment + "/" + ident
-
elif factory.auto_assign_id:
self.id = factory.generate_id(self)
else:
+ # Not auto assigning, and not submitted = blank node
self.id = ""
def _toJSON(self, done, top=None):
@@ -408,7 +411,7 @@ class BaseResource(ExternalResource):
_classification = ""
_classhier = []
- def __init__(self, ident="", label="", value="", content="", **kw):
+ def __init__(self, ident=None, label="", value="", content="", **kw):
"""Initialize BaseObject."""
super(BaseResource, self).__init__(ident)
|
thegetty/crom
|
5c812f1a0acd98311143b6f63185d2c3f2cc23b7
|
diff --git a/tests/test_model.py b/tests/test_model.py
index 5500935..657899b 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -210,17 +210,20 @@ class TestBuildClass(unittest.TestCase):
class TestAutoIdentifiers(unittest.TestCase):
def test_bad_autoid(self):
+ model.factory.auto_assign_id = True
model.factory.auto_id_type = "broken"
self.assertRaises(model.ConfigurationError, model.factory.generate_id,
"irrelevant")
def test_int(self):
+ model.factory.auto_assign_id = True
model.factory.auto_id_type = "int"
p = model.Person()
p2 = model.Activity()
self.assertEqual(int(p.id[-1]), int(p2.id[-1])-1)
def test_int_per_type(self):
+ model.factory.auto_assign_id = True
model.factory.auto_id_type = "int-per-type"
p = model.Person()
p2 = model.Person()
@@ -229,6 +232,7 @@ class TestAutoIdentifiers(unittest.TestCase):
self.assertEqual(int(p.id[-1]), int(p3.id[-1]))
def test_int_per_segment(self):
+ model.factory.auto_assign_id = True
model.factory._auto_id_segments = {}
model.factory.auto_id_type = "int-per-segment"
model.Activity._uri_segment = model.Person._uri_segment
@@ -239,6 +243,7 @@ class TestAutoIdentifiers(unittest.TestCase):
self.assertEqual(int(p.id[-1]), int(p3.id[-1]))
def test_uuid(self):
+ model.factory.auto_assign_id = True
model.factory.auto_id_type = "uuid"
p = model.Person()
self.assertTrue(p.id.startswith('urn:uuid:'))
@@ -254,6 +259,31 @@ class TestAutoIdentifiers(unittest.TestCase):
p4 = model.Person('fish:4')
self.assertTrue(p4.id.startswith(model.factory.base_url))
+ def test_no_ident(self):
+
+ model.factory.auto_assign_id = True
+ p1 = model.Person() # auto assigned
+ p2 = model.Person(ident=None) # auto assigned
+ p3 = model.Person(ident="") # bnode explicitly
+
+ self.assertTrue(p1.id.startswith('http'))
+ self.assertTrue(p2.id.startswith('http'))
+ self.assertEqual(p3.id, '')
+
+ model.factory.auto_assign_id = False
+ p4 = model.Person() # bnode is default
+ p5 = model.Person(ident=None) # bnode is default
+ p6 = model.Person(ident="") # bnode explicitly
+
+ self.assertEqual(p4.id, '')
+ self.assertEqual(p5.id, '')
+ self.assertEqual(p6.id, '')
+
+
+
+
+
+
class TestBaseResource(unittest.TestCase):
|
Allow ident="" to create blank nodes
WISOTT
|
0.0
|
5c812f1a0acd98311143b6f63185d2c3f2cc23b7
|
[
"tests/test_model.py::TestAutoIdentifiers::test_no_ident"
] |
[
"tests/test_model.py::TestFactorySetup::test_base_dir",
"tests/test_model.py::TestFactorySetup::test_base_url",
"tests/test_model.py::TestFactorySetup::test_default_lang",
"tests/test_model.py::TestFactorySetup::test_load_context",
"tests/test_model.py::TestFactorySetup::test_pickle",
"tests/test_model.py::TestFactorySetup::test_set_debug",
"tests/test_model.py::TestFactorySetup::test_set_debug_stream",
"tests/test_model.py::TestFactorySerialization::test_breadth",
"tests/test_model.py::TestFactorySerialization::test_broken_unicode",
"tests/test_model.py::TestFactorySerialization::test_external",
"tests/test_model.py::TestFactorySerialization::test_pipe_scoped",
"tests/test_model.py::TestFactorySerialization::test_recursion",
"tests/test_model.py::TestFactorySerialization::test_string_list",
"tests/test_model.py::TestFactorySerialization::test_toJSON",
"tests/test_model.py::TestFactorySerialization::test_toJSON_full",
"tests/test_model.py::TestFactorySerialization::test_toString",
"tests/test_model.py::TestProcessTSV::test_process_tsv",
"tests/test_model.py::TestBuildClasses::test_build_classes",
"tests/test_model.py::TestBuildClass::test_build_class",
"tests/test_model.py::TestAutoIdentifiers::test_bad_autoid",
"tests/test_model.py::TestAutoIdentifiers::test_int",
"tests/test_model.py::TestAutoIdentifiers::test_int_per_segment",
"tests/test_model.py::TestAutoIdentifiers::test_int_per_type",
"tests/test_model.py::TestAutoIdentifiers::test_prefixes",
"tests/test_model.py::TestAutoIdentifiers::test_uuid",
"tests/test_model.py::TestBaseResource::test_check_prop",
"tests/test_model.py::TestBaseResource::test_init",
"tests/test_model.py::TestBaseResource::test_list_all_props",
"tests/test_model.py::TestBaseResource::test_multiplicity",
"tests/test_model.py::TestMagicMethods::test_set_magic_resource",
"tests/test_model.py::TestMagicMethods::test_set_magic_resource_inverse",
"tests/test_model.py::TestMagicMethods::test_validate_multiplicity",
"tests/test_model.py::TestMagicMethods::test_validate_profile_off",
"tests/test_model.py::TestMagicMethods::test_validation_off",
"tests/test_model.py::TestMagicMethods::test_validation_unknown",
"tests/test_model.py::TestMagicMethods::test_validation_wrong_type"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-16 20:53:34+00:00
|
apache-2.0
| 5,869 |
|
thegetty__crom-56
|
diff --git a/cromulent/model.py b/cromulent/model.py
index 12997d8..e2363c1 100644
--- a/cromulent/model.py
+++ b/cromulent/model.py
@@ -371,10 +371,19 @@ class ExternalResource(object):
_type = ""
_embed = True
+
+ def _is_uri(self, what):
+ uri_schemes = ['urn:uuid:', 'tag:', 'data:', 'mailto:', 'info:', 'ftp:/', 'sftp:/']
+ for u in uri_schemes:
+ if what.startswith(u):
+ return True
+ return False
+
+
def __init__(self, ident=None):
self._factory = factory
if ident is not None:
- if ident.startswith('urn:uuid'):
+ if self._is_uri(ident):
self.id = ident
elif ident.startswith('http'):
# Try to find prefixable term
@@ -395,7 +404,7 @@ class ExternalResource(object):
# Allow explicit setting of empty string
self.id = ""
else:
- # Allow for prefixed term
+ # Allow for prefixed term that isn't ambiguously a URI
curied = ident.split(':', 1)
if len(curied) == 2 and curied[0] in self._factory.prefixes:
self.id = ident
|
thegetty/crom
|
44cbb8103b7c6372c111dd7969e6049ffaa05ad0
|
diff --git a/tests/test_model.py b/tests/test_model.py
index 281d0f8..e85827c 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -269,6 +269,14 @@ class TestAutoIdentifiers(unittest.TestCase):
p4 = model.Person('fish:4')
self.assertTrue(p4.id.startswith(model.factory.base_url))
+ def test_other_uris(self):
+ p1 = model.Person(ident="tag:some-info-about-person")
+ self.assertEqual(p1.id, "tag:some-info-about-person")
+ p2 = model.Person(ident="info:ulan/500012345")
+ self.assertEqual(p2.id, "info:ulan/500012345")
+ p3 = model.Person(ident="some:random:thing:with:colons")
+ self.assertFalse(p3.id == "some:random:thing:with:colons")
+
def test_no_ident(self):
model.factory.auto_assign_id = True
|
Support tag: URIs for internal identity management
Rather than assigning UUIDs or other final URIs to instances, crom should support tag: URIs that are temporary carriers of the identity conditions, to then be substituted on the way into a more permanent infrastructure. These would need to be assigned (they can't be auto-generated) and would be similar to the urn:uuid: pattern otherwise.
(/ht @kasei)
|
0.0
|
44cbb8103b7c6372c111dd7969e6049ffaa05ad0
|
[
"tests/test_model.py::TestAutoIdentifiers::test_other_uris"
] |
[
"tests/test_model.py::TestFactorySetup::test_base_dir",
"tests/test_model.py::TestFactorySetup::test_base_url",
"tests/test_model.py::TestFactorySetup::test_default_lang",
"tests/test_model.py::TestFactorySetup::test_load_context",
"tests/test_model.py::TestFactorySetup::test_pickle",
"tests/test_model.py::TestFactorySetup::test_set_debug",
"tests/test_model.py::TestFactorySetup::test_set_debug_stream",
"tests/test_model.py::TestFactorySerialization::test_breadth",
"tests/test_model.py::TestFactorySerialization::test_broken_unicode",
"tests/test_model.py::TestFactorySerialization::test_collapse_json",
"tests/test_model.py::TestFactorySerialization::test_external",
"tests/test_model.py::TestFactorySerialization::test_pipe_scoped",
"tests/test_model.py::TestFactorySerialization::test_recursion",
"tests/test_model.py::TestFactorySerialization::test_string_list",
"tests/test_model.py::TestFactorySerialization::test_toJSON",
"tests/test_model.py::TestFactorySerialization::test_toJSON_full",
"tests/test_model.py::TestFactorySerialization::test_toString",
"tests/test_model.py::TestProcessTSV::test_process_tsv",
"tests/test_model.py::TestBuildClasses::test_build_classes",
"tests/test_model.py::TestBuildClass::test_build_class",
"tests/test_model.py::TestAutoIdentifiers::test_bad_autoid",
"tests/test_model.py::TestAutoIdentifiers::test_int",
"tests/test_model.py::TestAutoIdentifiers::test_int_per_segment",
"tests/test_model.py::TestAutoIdentifiers::test_int_per_type",
"tests/test_model.py::TestAutoIdentifiers::test_no_ident",
"tests/test_model.py::TestAutoIdentifiers::test_prefixes",
"tests/test_model.py::TestAutoIdentifiers::test_uuid",
"tests/test_model.py::TestBaseResource::test_allows_multiple",
"tests/test_model.py::TestBaseResource::test_check_prop",
"tests/test_model.py::TestBaseResource::test_dir",
"tests/test_model.py::TestBaseResource::test_init",
"tests/test_model.py::TestBaseResource::test_init_params",
"tests/test_model.py::TestBaseResource::test_list_all_props",
"tests/test_model.py::TestBaseResource::test_list_my_props",
"tests/test_model.py::TestBaseResource::test_multiplicity",
"tests/test_model.py::TestPropertyCache::test_cache_hierarchy",
"tests/test_model.py::TestMagicMethods::test_set_magic_resource",
"tests/test_model.py::TestMagicMethods::test_set_magic_resource_inverse",
"tests/test_model.py::TestMagicMethods::test_validate_multiplicity",
"tests/test_model.py::TestMagicMethods::test_validate_profile_off",
"tests/test_model.py::TestMagicMethods::test_validation_off",
"tests/test_model.py::TestMagicMethods::test_validation_unknown",
"tests/test_model.py::TestMagicMethods::test_validation_wrong_type"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-06-19 18:40:11+00:00
|
apache-2.0
| 5,870 |
|
thelastpickle__cassandra-medusa-552
|
diff --git a/docs/Configuration.md b/docs/Configuration.md
index 6fa77f1..2f02795 100644
--- a/docs/Configuration.md
+++ b/docs/Configuration.md
@@ -10,9 +10,14 @@ Modify it to match your requirements:
;config_file = <path to cassandra.yaml. Defaults to /etc/cassandra/cassandra.yaml>
;cql_username = <username>
;cql_password = <password>
+; When using the following setting there must be files in:
+; - `<cql_k8s_secrets_path>/username` containing username
+; - `<cql_k8s_secrets_path>/password` containing password
+;cql_k8s_secrets_path = <path to kubernetes secrets folder>
;nodetool_username = <my nodetool username>
;nodetool_password = <my nodetool password>
;nodetool_password_file_path = <path to nodetool password file>
+;nodetool_k8s_secrets_path = <path to nodetool kubernetes secrets folder>
;nodetool_host = <host name or IP to use for nodetool>
;nodetool_port = <port number to use for nodetool>
;certfile= <Client SSL: path to rootCa certificate>
@@ -153,12 +158,14 @@ backup_grace_period_in_days = 10
Some config settings can be overriden through environment variables prefixed with `MEDUSA_`:
-| Setting | Env Variable |
-|------------------------|-------------------------------|
-| `cql_username` | `MEDUSA_CQL_USERNAME` |
-| `cql_password` | `MEDUSA_CQL_PASSWORD` |
-| `nodetool_username` | `MEDUSA_NODETOOL_USERNAME` |
-| `nodetool_password` | `MEDUSA_NODETOOL_PASSWORD` |
-| `sstableloader_tspw` | `MEDUSA_SSTABLELOADER_TSPW` |
-| `sstableloader_kspw` | `MEDUSA_SSTABLELOADER_KSPW` |
-| `resolve_ip_addresses` | `MEDUSA_RESOLVE_IP_ADDRESSES` |
+| Setting | Env Variable |
+|-----------------------------|------------------------------------|
+| `cql_username` | `MEDUSA_CQL_USERNAME` |
+| `cql_password` | `MEDUSA_CQL_PASSWORD` |
+| `cql_k8s_secrets_path` | `MEDUSA_CQL_K8S_SECRETS_PATH` |
+| `nodetool_username` | `MEDUSA_NODETOOL_USERNAME` |
+| `nodetool_password` | `MEDUSA_NODETOOL_PASSWORD` |
+| `nodetool_k8s_secrets_path` | `MEDUSA_NODETOOL_K8S_SECRETS_PATH` |
+| `sstableloader_tspw` | `MEDUSA_SSTABLELOADER_TSPW` |
+| `sstableloader_kspw` | `MEDUSA_SSTABLELOADER_KSPW` |
+| `resolve_ip_addresses` | `MEDUSA_RESOLVE_IP_ADDRESSES` |
diff --git a/medusa-example.ini b/medusa-example.ini
index 715d825..e7a4e2d 100644
--- a/medusa-example.ini
+++ b/medusa-example.ini
@@ -18,9 +18,14 @@
;config_file = <path to cassandra.yaml. Defaults to /etc/cassandra/cassandra.yaml>
;cql_username = <username>
;cql_password = <password>
+; When using the following setting there must be files in:
+; - `<cql_k8s_secrets_path>/username` containing username
+; - `<cql_k8s_secrets_path>/password` containing password
+;cql_k8s_secrets_path = <path to kubernetes secrets folder>
;nodetool_username = <my nodetool username>
;nodetool_password = <my nodetool password>
;nodetool_password_file_path = <path to nodetool password file>
+;nodetool_k8s_secrets_path = <path to nodetool kubernetes secrets folder>
;nodetool_host = <host name or IP to use for nodetool>
;nodetool_port = <port number to use for nodetool>
;certfile= <Client SSL: path to rootCa certificate>
diff --git a/medusa/config.py b/medusa/config.py
index 0ecdfd3..e95ac0a 100644
--- a/medusa/config.py
+++ b/medusa/config.py
@@ -39,7 +39,8 @@ CassandraConfig = collections.namedtuple(
['start_cmd', 'stop_cmd', 'config_file', 'cql_username', 'cql_password', 'check_running', 'is_ccm',
'sstableloader_bin', 'nodetool_username', 'nodetool_password', 'nodetool_password_file_path', 'nodetool_host',
'nodetool_port', 'certfile', 'usercert', 'userkey', 'sstableloader_ts', 'sstableloader_tspw',
- 'sstableloader_ks', 'sstableloader_kspw', 'nodetool_ssl', 'resolve_ip_addresses', 'use_sudo', 'nodetool_flags']
+ 'sstableloader_ks', 'sstableloader_kspw', 'nodetool_ssl', 'resolve_ip_addresses', 'use_sudo', 'nodetool_flags',
+ 'cql_k8s_secrets_path', 'nodetool_k8s_secrets_path']
)
SSHConfig = collections.namedtuple(
@@ -229,12 +230,30 @@ def parse_config(args, config_file):
'nodetool_password',
'sstableloader_tspw',
'sstableloader_kspw',
- 'resolve_ip_addresses'
+ 'resolve_ip_addresses',
+ 'cql_k8s_secrets_path',
+ 'nodetool_k8s_secrets_path'
]:
config_property_upper = "MEDUSA_{}".format(config_property.upper())
if config_property_upper in os.environ:
config.set('cassandra', config_property, os.environ[config_property_upper])
+ if config.has_option('cassandra', 'cql_k8s_secrets_path'):
+ cql_k8s_secrets_path = config.get('cassandra', 'cql_k8s_secrets_path')
+ if cql_k8s_secrets_path:
+ logging.debug('Using cql_k8s_secrets_path (path="{}")'.format(cql_k8s_secrets_path))
+ cql_k8s_username, cql_k8s_password = _load_k8s_secrets(cql_k8s_secrets_path)
+ config.set('cassandra', 'cql_username', cql_k8s_username)
+ config.set('cassandra', 'cql_password', cql_k8s_password)
+
+ if config.has_option('cassandra', 'nodetool_k8s_secrets_path'):
+ nodetool_k8s_secrets_path = config.get('cassandra', 'nodetool_k8s_secrets_path')
+ if nodetool_k8s_secrets_path:
+ logging.debug('Using nodetool_k8s_secrets_path (path="{}")'.format(nodetool_k8s_secrets_path))
+ nodetool_k8s_username, nodetool_k8s_password = _load_k8s_secrets(nodetool_k8s_secrets_path)
+ config.set('cassandra', 'nodetool_username', nodetool_k8s_username)
+ config.set('cassandra', 'nodetool_password', nodetool_k8s_password)
+
resolve_ip_addresses = config['cassandra']['resolve_ip_addresses']
hostname_resolver = HostnameResolver(resolve_ip_addresses, kubernetes_enabled)
if config['storage']['fqdn'] == socket.getfqdn() and not resolve_ip_addresses:
@@ -249,6 +268,27 @@ def parse_config(args, config_file):
return config
+def _load_k8s_secrets(k8s_secrets_path):
+ """Load username and password from files following the k8s secrets convention.
+
+ :param str k8s_secrets_path: folder path containing the secrets
+ :return str, str: username and password contained in files
+ """
+ # By default, username and password are available in path/username and path/password.
+ # They could be in other places if overridden, this is not supported for now. Refs:
+ # https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod
+ # https://kubernetes.io/docs/concepts/configuration/secret/#consuming-secret-values-from-volumes
+ k8s_username_file = os.path.join(k8s_secrets_path, 'username')
+ logging.debug('Loading k8s username from "{}"'.format(k8s_username_file))
+ with open(k8s_username_file, 'r') as f:
+ k8s_username = f.read().strip()
+ k8s_password_file = os.path.join(k8s_secrets_path, 'password')
+ logging.debug('Loading k8s password from "{}"'.format(k8s_password_file))
+ with open(k8s_password_file, 'r') as f:
+ k8s_password = f.read().strip()
+ return k8s_username, k8s_password
+
+
def load_config(args, config_file):
"""Load configuration from a medusa.ini file
|
thelastpickle/cassandra-medusa
|
d73d5b03f558a9ac5d08f18d52c276a685975d1d
|
diff --git a/tests/config_test.py b/tests/config_test.py
index 56acc04..3c4ead6 100644
--- a/tests/config_test.py
+++ b/tests/config_test.py
@@ -18,6 +18,7 @@ import pathlib
import unittest
from unittest.mock import patch
import socket
+import tempfile
import medusa.config
import medusa.utils
@@ -98,6 +99,44 @@ class ConfigTest(unittest.TestCase):
assert config.cassandra.cql_username == 'new_cql_username'
assert config.cassandra.cql_password == 'new_cql_password'
+ def test_cql_k8s_secrets_path_override(self):
+ """
+ Ensure that CQL credentials stored in a path following k8s convention override the default vars.
+ """
+ tmpdir = tempfile.mkdtemp()
+ os.environ['MEDUSA_CQL_K8S_SECRETS_PATH'] = tmpdir
+ # Write k8s_username and k8s_password in /tmpdir/username and /tmpdir/password
+ for k8s_cred in ['username', 'password']:
+ with open(os.path.join(tmpdir, k8s_cred), 'w') as f:
+ f.write('k8s_{}'.format(k8s_cred))
+
+ args = {}
+ config = medusa.config.load_config(args, self.medusa_config_file)
+ assert config.cassandra.cql_username == 'k8s_username'
+ assert config.cassandra.cql_password == 'k8s_password'
+
+ # Cleanup
+ os.environ.pop('MEDUSA_CQL_K8S_SECRETS_PATH', None)
+
+ def test_nodetool_k8s_secrets_path_override(self):
+ """
+ Ensure that nodetool credentials stored in a path following k8s convention override the default vars.
+ """
+ tmpdir = tempfile.mkdtemp()
+ os.environ['MEDUSA_NODETOOL_K8S_SECRETS_PATH'] = tmpdir
+ # Write nodetool_username and nodetool_password in /tmpdir/username and /tmpdir/password
+ for k8s_cred in ['username', 'password']:
+ with open(os.path.join(tmpdir, k8s_cred), 'w') as f:
+ f.write('k8s_{}'.format(k8s_cred))
+
+ args = {}
+ config = medusa.config.load_config(args, self.medusa_config_file)
+ assert config.cassandra.nodetool_username == 'k8s_username'
+ assert config.cassandra.nodetool_password == 'k8s_password'
+
+ # Cleanup
+ os.environ.pop('MEDUSA_NODETOOL_K8S_SECRETS_PATH', None)
+
def test_args_settings_override(self):
"""Ensure that each config file's section settings can be overridden with command line options"""
args = {
|
Allow Medusa to take CQL credentials through a file
The K8ssandra-operator modular secrets backend requires the ability for Medusa to read CQL credentials from files. Currently only environment variables are supported.
┆Issue is synchronized with this [Jira Task](https://k8ssandra.atlassian.net/browse/K8SSAND-1619) by [Unito](https://www.unito.io)
┆friendlyId: K8SSAND-1619
┆priority: Medium
|
0.0
|
d73d5b03f558a9ac5d08f18d52c276a685975d1d
|
[
"tests/config_test.py::ConfigTest::test_cql_k8s_secrets_path_override",
"tests/config_test.py::ConfigTest::test_nodetool_k8s_secrets_path_override"
] |
[
"tests/config_test.py::ConfigTest::test_args_settings_override",
"tests/config_test.py::ConfigTest::test_different_auth_env_variables",
"tests/config_test.py::ConfigTest::test_fqdn_with_resolve_ip_addresses_disabled",
"tests/config_test.py::ConfigTest::test_fqdn_with_resolve_ip_addresses_enabled",
"tests/config_test.py::ConfigTest::test_new_env_variables_override_deprecated_ones",
"tests/config_test.py::ConfigTest::test_no_auth_env_variables",
"tests/config_test.py::ConfigTest::test_overridden_fqdn",
"tests/config_test.py::ConfigTest::test_use_sudo_default",
"tests/config_test.py::ConfigTest::test_use_sudo_kubernetes_disabled",
"tests/config_test.py::ConfigTest::test_use_sudo_kubernetes_enabled"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-04 15:35:43+00:00
|
apache-2.0
| 5,871 |
|
thelastpickle__cassandra-medusa-592
|
diff --git a/medusa/service/grpc/restore.py b/medusa/service/grpc/restore.py
index 0d5ccf6..ea6e79e 100644
--- a/medusa/service/grpc/restore.py
+++ b/medusa/service/grpc/restore.py
@@ -23,7 +23,6 @@ from pathlib import Path
import medusa.config
import medusa.restore_node
import medusa.listing
-from medusa.service.grpc.server import RESTORE_MAPPING_LOCATION
def create_config(config_file_path):
@@ -49,43 +48,34 @@ def configure_console_logging(config):
logging.getLogger(logger_name).setLevel(logging.WARN)
-if __name__ == '__main__':
- if len(sys.argv) > 3:
- config_file_path = sys.argv[2]
- restore_key = sys.argv[3]
- else:
- logging.error("Usage: {} <config_file_path> <restore_key>".format(sys.argv[0]))
- sys.exit(1)
-
+def apply_mapping_env():
+ # By default we consider that we're restoring in place.
in_place = True
- if os.path.exists(f"{RESTORE_MAPPING_LOCATION}/{restore_key}"):
- logging.info(f"Reading mapping file {RESTORE_MAPPING_LOCATION}/{restore_key}")
- with open(f"{RESTORE_MAPPING_LOCATION}/{restore_key}", 'r') as f:
- mapping = json.load(f)
- # Mapping json structure will look like:
- # {'in_place': true,
- # 'host_map':
- # {'172.24.0.3': {'source': ['172.24.0.3'], 'seed': False},
- # '127.0.0.1': {'source': ['172.24.0.4'], 'seed': False},
- # '172.24.0.6': {'source': ['172.24.0.6'], 'seed': False}}}
- # As each mapping is specific to a Cassandra node, we're looking for the node that maps to 127.0.0.1,
- # which will be different for each pod.
- # If hostname resolving is turned on, we're looking for the localhost key instead.
+ if "RESTORE_MAPPING" in os.environ.keys():
+ logging.info("Reading restore mapping from environment variable")
+ mapping = json.loads(os.environ["RESTORE_MAPPING"])
+ # Mapping json structure will look like:
+ # {'in_place': true,
+ # 'host_map':
+ # {'test-dc1-sts-0': {'source': ['172.24.0.3'], 'seed': False},
+ # 'test-dc1-sts-1': {'source': ['172.24.0.4'], 'seed': False},
+ # 'test-dc1-sts-2': {'source': ['172.24.0.6'], 'seed': False}}}
+ # As each mapping is specific to a Cassandra node, we're looking for
+ # the node that maps to the value of the POD_NAME var.
+ in_place = mapping["in_place"]
+ if not in_place:
print(f"Mapping: {mapping}")
- if "localhost" in mapping["host_map"].keys():
- os.environ["POD_IP"] = mapping["host_map"]["localhost"]["source"][0]
- elif "127.0.0.1" in mapping["host_map"].keys():
- os.environ["POD_IP"] = mapping["host_map"]["127.0.0.1"]["source"][0]
- elif "::1" in mapping["host_map"].keys():
- os.environ["POD_IP"] = mapping["host_map"]["::1"]["source"][0]
- in_place = mapping["in_place"]
- if not in_place and "POD_IP" not in os.environ.keys():
- print("Could not find target node mapping for this pod while performing remote restore. Exiting.")
- sys.exit(1)
+ # While POD_IP isn't a great name, it's the env variable that is used to enforce the fqdn of the node.
+ # This allows us to specify which node we're restoring from.
+ if os.environ["POD_NAME"] in mapping["host_map"].keys():
+ os.environ["POD_IP"] = mapping["host_map"][os.environ["POD_NAME"]]["source"][0]
+ print(f"Restoring from {os.environ['POD_IP']}")
+ else:
+ return False, f"POD_NAME {os.environ['POD_NAME']} not found in mapping"
+ return in_place, None
- config = create_config(config_file_path)
- configure_console_logging(config.logging)
+def restore_backup(in_place, config):
backup_name = os.environ["BACKUP_NAME"]
tmp_dir = Path("/tmp") if "MEDUSA_TMP_DIR" not in os.environ else Path(os.environ["MEDUSA_TMP_DIR"])
print(f"Downloading backup {backup_name} to {tmp_dir}")
@@ -98,17 +88,33 @@ if __name__ == '__main__':
cluster_backups = list(medusa.listing.get_backups(config, True))
logging.info(f"Found {len(cluster_backups)} backups in the cluster")
- backup_found = False
# Checking if the backup exists for the node we're restoring.
# Skipping restore if it doesn't exist.
for cluster_backup in cluster_backups:
if cluster_backup.name == backup_name:
- backup_found = True
logging.info("Starting restore of backup {}".format(backup_name))
medusa.restore_node.restore_node(config, tmp_dir, backup_name, in_place, keep_auth,
seeds, verify, keyspaces, tables, use_sstableloader)
- logging.info("Finished restore of backup {}".format(backup_name))
- break
+ return f"Finished restore of backup {backup_name}"
+
+ return f"Skipped restore of missing backup {backup_name}"
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 3:
+ config_file_path = sys.argv[2]
+ restore_key = sys.argv[3]
+ else:
+ logging.error("Usage: {} <config_file_path> <restore_key>".format(sys.argv[0]))
+ sys.exit(1)
+
+ (in_place, error_message) = apply_mapping_env()
+ if error_message:
+ print(error_message)
+ sys.exit(1)
+
+ config = create_config(config_file_path)
+ configure_console_logging(config.logging)
- if not backup_found:
- logging.info("Skipped restore of missing backup {}".format(backup_name))
+ output_message = restore_backup(in_place, config)
+ logging.info(output_message)
diff --git a/medusa/service/grpc/server.py b/medusa/service/grpc/server.py
index 119a31e..9a456a9 100644
--- a/medusa/service/grpc/server.py
+++ b/medusa/service/grpc/server.py
@@ -43,6 +43,7 @@ TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
BACKUP_MODE_DIFFERENTIAL = "differential"
BACKUP_MODE_FULL = "full"
RESTORE_MAPPING_LOCATION = "/var/lib/cassandra/.restore_mapping"
+RESTORE_MAPPING_ENV = "RESTORE_MAPPING"
class Server:
|
thelastpickle/cassandra-medusa
|
ec425ec37f42cb5434644e9b84a353b5f49f3842
|
diff --git a/tests/service/grpc/restore_test.py b/tests/service/grpc/restore_test.py
new file mode 100644
index 0000000..831dcea
--- /dev/null
+++ b/tests/service/grpc/restore_test.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import unittest
+import os
+from unittest.mock import MagicMock, patch
+from pathlib import PosixPath
+
+from medusa.service.grpc.restore import apply_mapping_env, restore_backup
+
+
+class ServiceRestoreTest(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def setUp(self):
+ os.environ.pop('POD_IP', None)
+ os.environ.pop('POD_NAME', None)
+ os.environ.pop('RESTORE_MAPPING', None)
+
+ def test_restore_inplace(self):
+ os.environ['POD_NAME'] = 'test-dc1-sts-0'
+ os.environ['RESTORE_MAPPING'] = '{"in_place": true, "host_map": {' \
+ + '"test-dc1-sts-0": {"source": ["test-dc1-sts-0"], "seed": false},' \
+ + '"test-dc1-sts-1": {"source": ["test-dc1-sts-1"], "seed": false},' \
+ + '"test-dc1-sts-2": {"source": "prod-dc1-sts-2", "seed": false}}}'
+ (in_place, error_message) = apply_mapping_env()
+
+ assert in_place is True
+ assert error_message is None
+ assert "POD_IP" not in os.environ.keys()
+
+ def test_restore_remote(self):
+ os.environ.update({'POD_NAME': 'test-dc1-sts-0'})
+ os.environ['RESTORE_MAPPING'] = '{"in_place": false, "host_map": {' \
+ + '"test-dc1-sts-0": {"source": ["prod-dc1-sts-3"], "seed": false},' \
+ + '"test-dc1-sts-1": {"source": ["prod-dc1-sts-1"], "seed": false},' \
+ + '"test-dc1-sts-2": {"source": "prod-dc1-sts-2", "seed": false}}}'
+ (in_place, error_message) = apply_mapping_env()
+
+ assert in_place is False
+ assert error_message is None
+ assert "POD_IP" in os.environ.keys()
+ assert os.environ['POD_IP'] == 'prod-dc1-sts-3'
+
+ def test_restore_no_match(self):
+ os.environ['POD_NAME'] = 'test-dc1-sts-0'
+ os.environ['RESTORE_MAPPING'] = '{"in_place": false, "host_map": {' \
+ + '"test-dc1-sts-3": {"source": ["prod-dc1-sts-3"], "seed": false},' \
+ + '"test-dc1-sts-1": {"source": ["prod-dc1-sts-1"], "seed": false},' \
+ + '"test-dc1-sts-2": {"source": "prod-dc1-sts-2", "seed": false}}}'
+ (in_place, error_message) = apply_mapping_env()
+
+ assert in_place is False
+ assert error_message is not None
+ assert "POD_IP" not in os.environ.keys()
+
+ def test_success_restore_backup(self):
+ # Define test inputs
+ in_place = True
+ config = {'some': 'config'}
+
+ # Define expected output
+ expected_output = 'Finished restore of backup test_backup'
+
+ # Set up mock environment variables
+ os.environ["BACKUP_NAME"] = "test_backup"
+ os.environ["MEDUSA_TMP_DIR"] = "/tmp"
+
+ # Set up mock for medusa.listing.get_backups()
+ with patch('medusa.listing.get_backups') as mock_get_backups:
+ mock_cluster_backup = MagicMock()
+ mock_cluster_backup.name = "test_backup"
+ mock_get_backups.return_value = [mock_cluster_backup]
+
+ # Set up mock for medusa.restore_node.restore_node()
+ with patch('medusa.restore_node.restore_node') as mock_restore_node:
+ mock_restore_node.return_value = None
+
+ # Call the function
+ result = restore_backup(in_place, config)
+
+ # Assertions
+ assert result == expected_output
+ mock_get_backups.assert_called_once_with(config, True)
+ mock_restore_node.assert_called_once_with(config, PosixPath('/tmp'),
+ 'test_backup', True, False, None, False, {}, {}, False)
+
+ def test_fail_restore_backup(self):
+ # Define test inputs
+ in_place = True
+ config = {'some': 'config'}
+
+ # Define expected output
+ expected_output = 'Skipped restore of missing backup test_backup'
+
+ # Set up mock environment variables
+ os.environ["BACKUP_NAME"] = "test_backup"
+ os.environ["MEDUSA_TMP_DIR"] = "/tmp"
+
+ # Set up mock for medusa.listing.get_backups()
+ with patch('medusa.listing.get_backups') as mock_get_backups:
+ mock_cluster_backup = MagicMock()
+ mock_cluster_backup.name = "test_backup10"
+ mock_get_backups.return_value = [mock_cluster_backup]
+
+ # Set up mock for medusa.restore_node.restore_node()
+ with patch('medusa.restore_node.restore_node') as mock_restore_node:
+ mock_restore_node.return_value = None
+
+ result = restore_backup(in_place, config)
+
+ assert result == expected_output
+ mock_get_backups.assert_called_once_with(config, True)
+ mock_restore_node.assert_not_called()
+
+
+if __name__ == '__main__':
+ unittest.main()
|
Modify k8s restores to use an env variable to store the restore mapping
Currently in `restore.py`, we read from a file the restore mapping using a fairly brittle process.
We need to evolve this into reading the mapping from an env variable, using a mapping generated by k8ssandra-operator instead of medusa itself.
We'll rely on the `POD_NAME` env variable to identify the current node and find the source node for the restore, which will be used to set the `POD_IP` env variable and enforce the fqdn for the restore operation.
|
0.0
|
ec425ec37f42cb5434644e9b84a353b5f49f3842
|
[
"tests/service/grpc/restore_test.py::ServiceRestoreTest::test_fail_restore_backup",
"tests/service/grpc/restore_test.py::ServiceRestoreTest::test_restore_inplace",
"tests/service/grpc/restore_test.py::ServiceRestoreTest::test_restore_no_match",
"tests/service/grpc/restore_test.py::ServiceRestoreTest::test_restore_remote",
"tests/service/grpc/restore_test.py::ServiceRestoreTest::test_success_restore_backup"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-30 18:29:01+00:00
|
apache-2.0
| 5,872 |
|
thelastpickle__cassandra-medusa-597
|
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7ef0044..698d56b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -107,7 +107,7 @@ jobs:
matrix:
#python-version: [3.6]
python-version: [3.6, "3.10"]
- it-backend: [local, s3, gcs, minio, azure]
+ it-backend: [local, s3, gcs, minio, azure, azure-hierarchical]
# IBM not included by default due to lite plan quota being easily exceeded
#it-backend: [local, s3, gcs, minio, ibm, azure]
cassandra-version: [2.2.19, 3.11.11, 4.0.0, 'github:apache/trunk']
@@ -146,7 +146,15 @@ jobs:
cassandra-version: 'github:apache/trunk'
- it-backend: azure
python-version: "3.10"
-
+ - it-backend: azure-hierarchical
+ cassandra-version: 2.2.19
+ - it-backend: azure-hierarchical
+ cassandra-version: 3.11.11
+ - it-backend: azure-hierarchical
+ cassandra-version: 'github:apache/trunk'
+ - it-backend: azure-hierarchical
+ python-version: "3.10"
+
runs-on: ubuntu-20.04
services:
minio:
@@ -180,7 +188,7 @@ jobs:
pip install -r requirements-test.txt
pip install ccm
case '${{ matrix.it-backend }}' in
- 'azure')
+ 'azure'|'azure-hierarchical')
pip install -r requirements-azure.txt
;;
'ibm'|'minio'|'s3')
@@ -198,7 +206,8 @@ jobs:
|| ( "${{ matrix.it-backend }}" == "minio" ) \
|| ( -n '${{ secrets.MEDUSA_GCS_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "gcs" ) \
|| ( -n '${{ secrets.MEDUSA_IBM_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "ibm" ) \
- || ( -n '${{ secrets.MEDUSA_AZURE_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "azure" ) ]];
+ || ( -n '${{ secrets.MEDUSA_AZURE_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "azure" ) \
+ || ( -n '${{ secrets.MEDUSA_AZURE_HIERARCHICAL_CREDENTIALS }}' && "${{ matrix.it-backend }}" == "azure-hierarchical" ) ]];
then
echo "IT_CAN_RUN=yes" >> $GITHUB_ENV
else
@@ -263,6 +272,11 @@ jobs:
# Azure Blob Storage tests
printf "%s" '${{ secrets.MEDUSA_AZURE_CREDENTIALS }}' > ~/medusa_azure_credentials.json
./run_integration_tests.sh -v --azure --no-local --cassandra-version=${{ matrix.cassandra-version }}
+ elif [ "${{ matrix.it-backend }}" == "azure-hierarchical" ]
+ then
+ # Azure Blob Storage with hierarchical namespace tests
+ printf "%s" '${{ secrets.MEDUSA_AZURE_HIERARCHICAL_CREDENTIALS }}' > ~/medusa_azure_credentials.json
+ ./run_integration_tests.sh -v --azure --no-local --cassandra-version=${{ matrix.cassandra-version }}
else
# Local storage tests
./run_integration_tests.sh -v --cassandra-version=${{ matrix.cassandra-version }}
diff --git a/medusa/storage/__init__.py b/medusa/storage/__init__.py
index 9485aa2..30173d4 100644
--- a/medusa/storage/__init__.py
+++ b/medusa/storage/__init__.py
@@ -281,7 +281,8 @@ class Storage(object):
def group_backup_index_by_backup_and_node(self, backup_index_blobs):
def get_backup_name(blob):
- return blob.name.split('/')[2] if len(str(self.prefix_path)) <= 1 else blob.name.split('/')[3]
+ blob_name_chunks = blob.name.split('/')
+ return blob_name_chunks[2] if len(str(self.prefix_path)) <= 1 else blob_name_chunks[3]
def name_and_fqdn(blob):
return get_backup_name(blob), Storage.get_fqdn_from_any_index_blob(blob)
@@ -292,9 +293,20 @@ class Storage(object):
def group_by_fqdn(blobs):
return itertools.groupby(blobs, Storage.get_fqdn_from_any_index_blob)
+ def has_proper_name(blob):
+ blob_name_chunks = blob.name.split('/')
+ is_proper = len(blob_name_chunks) == 4 if len(str(self.prefix_path)) <= 1 else len(blob_name_chunks) == 5
+ if not is_proper:
+ logging.warning('File {} in backup index has improper name'.format(blob.name))
+ return is_proper
+
blobs_by_backup = {}
+ properly_named_index_blobs = filter(
+ has_proper_name,
+ backup_index_blobs
+ )
sorted_backup_index_blobs = sorted(
- backup_index_blobs,
+ properly_named_index_blobs,
key=name_and_fqdn
)
diff --git a/medusa/storage/abstract_storage.py b/medusa/storage/abstract_storage.py
index 61c5436..bd887c0 100644
--- a/medusa/storage/abstract_storage.py
+++ b/medusa/storage/abstract_storage.py
@@ -54,6 +54,8 @@ class AbstractStorage(abc.ABC):
else:
objects = self.driver.list_container_objects(self.bucket, ex_prefix=str(path))
+ objects = list(filter(lambda blob: blob.size > 0, objects))
+
return objects
@retry(stop_max_attempt_number=7, wait_exponential_multiplier=10000, wait_exponential_max=120000)
diff --git a/medusa/storage/local_storage.py b/medusa/storage/local_storage.py
index 0759d27..57da294 100644
--- a/medusa/storage/local_storage.py
+++ b/medusa/storage/local_storage.py
@@ -41,6 +41,8 @@ class LocalStorage(AbstractStorage):
if path is not None:
objects = list(filter(lambda blob: blob.name.startswith(path), objects))
+ objects = list(filter(lambda blob: blob.size > 0, objects))
+
return objects
def get_object_datetime(self, blob):
|
thelastpickle/cassandra-medusa
|
9d1060268f9512a5a6215f4ca98bdc44cc456b42
|
diff --git a/tests/storage_test.py b/tests/storage_test.py
index c7c0ded..94307d8 100644
--- a/tests/storage_test.py
+++ b/tests/storage_test.py
@@ -92,8 +92,10 @@ class RestoreNodeTest(unittest.TestCase):
def test_list_objects(self):
file1_content = "content of the test file1"
file2_content = "content of the test file2"
+ file3_content = ""
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
+ self.storage.storage_driver.upload_blob_from_string("test_download_blobs3/file3.txt", file3_content)
objects = self.storage.storage_driver.list_objects()
self.assertEqual(len(objects), 2)
one_object = self.storage.storage_driver.list_objects("test_download_blobs2")
@@ -284,6 +286,34 @@ class RestoreNodeTest(unittest.TestCase):
self.assertTrue("node1" in blobs_by_backup["backup2"])
self.assertFalse("node2" in blobs_by_backup["backup2"])
+ def test_parse_backup_index_with_wrong_names(self):
+ file_content = "content of the test file"
+ prefix_path = self.storage.prefix_path
+
+ # Index files for a backup
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/tokenmap_node1.json".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/schema_node1.cql".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/started_node1_1689598370.timestamp".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/finished_node1_1689598370.timestamp".format(prefix_path), file_content)
+ # Files that we want to see filtered out
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/extra_folder/backup3/tokenmap_node2.json".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/missing_folder/tokenmap_node2.json".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/missing_file".format(prefix_path), file_content)
+
+ path = '{}index/backup_index'.format(prefix_path)
+ backup_index = self.storage.storage_driver.list_objects(path)
+ blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index)
+ self.assertEqual(1, len(blobs_by_backup.keys()))
+ self.assertEqual(1, len(blobs_by_backup['backup3'].keys()))
+ self.assertEqual(4, len(blobs_by_backup['backup3']['node1']))
+
def test_remove_extension(self):
self.assertEqual(
'localhost',
diff --git a/tests/storage_test_with_prefix.py b/tests/storage_test_with_prefix.py
index 5bf5f73..618548a 100644
--- a/tests/storage_test_with_prefix.py
+++ b/tests/storage_test_with_prefix.py
@@ -93,8 +93,10 @@ class RestoreNodeTest(unittest.TestCase):
def test_list_objects(self):
file1_content = "content of the test file1"
file2_content = "content of the test file2"
+ file3_content = ""
self.storage.storage_driver.upload_blob_from_string("test_download_blobs1/file1.txt", file1_content)
self.storage.storage_driver.upload_blob_from_string("test_download_blobs2/file2.txt", file2_content)
+ self.storage.storage_driver.upload_blob_from_string("test_download_blobs3/file3.txt", file3_content)
objects = self.storage.storage_driver.list_objects()
self.assertEqual(len(objects), 2)
one_object = self.storage.storage_driver.list_objects("test_download_blobs2")
@@ -286,6 +288,34 @@ class RestoreNodeTest(unittest.TestCase):
self.assertTrue("node1" in blobs_by_backup["backup2"])
self.assertFalse("node2" in blobs_by_backup["backup2"])
+ def test_parse_backup_index_with_wrong_names(self):
+ file_content = "content of the test file"
+ prefix_path = self.storage.prefix_path
+
+ # Index files for a backup
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/tokenmap_node1.json".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/schema_node1.cql".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/started_node1_1689598370.timestamp".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/backup3/finished_node1_1689598370.timestamp".format(prefix_path), file_content)
+ # Files that we want to see filtered out
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/extra_folder/backup3/tokenmap_node2.json".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/missing_folder/tokenmap_node2.json".format(prefix_path), file_content)
+ self.storage.storage_driver.upload_blob_from_string(
+ "{}index/backup_index/missing_file".format(prefix_path), file_content)
+
+ path = '{}index/backup_index'.format(prefix_path)
+ backup_index = self.storage.storage_driver.list_objects(path)
+ blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(backup_index)
+ self.assertEqual(1, len(blobs_by_backup.keys()))
+ self.assertEqual(1, len(blobs_by_backup['backup3'].keys()))
+ self.assertEqual(4, len(blobs_by_backup['backup3']['node1']))
+
def test_remove_extension(self):
self.assertEqual(
'localhost',
|
Listing backups fails on some installations against Azure
[Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=32514135)
While we couldn't reproduce the issue, we're seeing installations where backups are taken correctly but listing them fails with the following error:
```
Traceback (most recent call last):
File "/usr/local/bin/medusa", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/click/decorators.py", line 84, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/medusa/medusacli.py", line 185, in list_backups
medusa.listing.list_backups(medusaconfig, show_all)
File "/usr/local/lib/python3.6/site-packages/medusa/listing.py", line 41, in list_backups
cluster_backups = get_backups(config, show_all)
File "/usr/local/lib/python3.6/site-packages/medusa/listing.py", line 29, in get_backups
key=lambda b: b.started
File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 355, in list_cluster_backups
key=lambda b: (b.name, b.started)
File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 190, in list_node_backups
blobs_by_backup = self.group_backup_index_by_backup_and_node(backup_index_blobs)
File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 299, in group_backup_index_by_backup_and_node
key=name_and_fqdn
File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 288, in name_and_fqdn
return get_backup_name(blob), Storage.get_fqdn_from_any_index_blob(blob)
File "/usr/local/lib/python3.6/site-packages/medusa/storage/__init__.py", line 285, in get_backup_name
return blob.name.split('/')[2] if len(str(self.prefix_path)) <= 1 else blob.name.split('/')[3]
IndexError: list index out of range
```
After adding some debugging outputs, we see that the folders are returned when listing backups, giving a hierarchical view of the blobs instead of a flat one:
```
[2023-06-29 20:29:25,480] DEBUG: Loading storage_provider: azure_blobs
[2023-06-29 20:29:26,966] DEBUG: [Storage] Listing objects in index/backup_index
[2023-06-29 20:29:26,991] DEBUG: Found backup index blob: index/backup_index
[2023-06-29 20:29:26,991] DEBUG: Found backup index blob: index/backup_index/cassandra_backup_20230628
[2023-06-29 20:29:26,991] DEBUG: Found backup index blob: index/backup_index/cassandra_backup_20230628/differential....
...
```
This is unexpected and not dealt with correctly in the code.
We either need to filter out the folders when listing blobs, which should be made possible thanks to their 0 size.
We could also make the `get_backup_name()` method more resilient to such issues and do a precheck before trying to access the split array indices. If we don't have the index we're looking for, we can issue a warning and skip the file without erroring.
## Definition of Done
- [ ] when listing blobs in the list-backups command, empty files are filtered out
- [ ] `get_backup_name()` will detect blob that aren't named as expected and ignore them
|
0.0
|
9d1060268f9512a5a6215f4ca98bdc44cc456b42
|
[
"tests/storage_test.py::RestoreNodeTest::test_list_objects",
"tests/storage_test.py::RestoreNodeTest::test_parse_backup_index_with_wrong_names",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_list_objects",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_parse_backup_index_with_wrong_names"
] |
[
"tests/storage_test.py::RestoreNodeTest::test_add_object_from_string",
"tests/storage_test.py::RestoreNodeTest::test_download_blobs",
"tests/storage_test.py::RestoreNodeTest::test_generate_md5_hash",
"tests/storage_test.py::RestoreNodeTest::test_get_blob",
"tests/storage_test.py::RestoreNodeTest::test_get_fqdn_from_any_index_blob",
"tests/storage_test.py::RestoreNodeTest::test_get_fqdn_from_backup_index_blob",
"tests/storage_test.py::RestoreNodeTest::test_get_object_datetime",
"tests/storage_test.py::RestoreNodeTest::test_get_timestamp_from_blob_name",
"tests/storage_test.py::RestoreNodeTest::test_hashes_match",
"tests/storage_test.py::RestoreNodeTest::test_parse_backup_index",
"tests/storage_test.py::RestoreNodeTest::test_read_blob",
"tests/storage_test.py::RestoreNodeTest::test_read_blob_as_bytes",
"tests/storage_test.py::RestoreNodeTest::test_remove_extension",
"tests/storage_test.py::RestoreNodeTest::test_verify_hash",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_add_object_from_string",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_download_blobs",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_blob",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_fqdn_from_any_index_blob",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_fqdn_from_backup_index_blob",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_object_datetime",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_get_timestamp_from_blob_name",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_hashes_match",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_parse_backup_index",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_parse_backup_index_common_prefix",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_read_blob",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_read_blob_as_bytes",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_remove_extension",
"tests/storage_test_with_prefix.py::RestoreNodeTest::test_verify_hash"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-14 11:25:50+00:00
|
apache-2.0
| 5,873 |
|
thelastpickle__cassandra-medusa-649
|
diff --git a/medusa/storage/s3_base_storage.py b/medusa/storage/s3_base_storage.py
index 5fc5e77..6819dce 100644
--- a/medusa/storage/s3_base_storage.py
+++ b/medusa/storage/s3_base_storage.py
@@ -99,55 +99,40 @@ class S3BaseStorage(AbstractStorage):
def __init__(self, config):
- if config.kms_id:
+ self.kms_id = None
+ if config.kms_id is not None:
logging.debug("Using KMS key {}".format(config.kms_id))
+ self.kms_id = config.kms_id
self.credentials = self._consolidate_credentials(config)
-
logging.info('Using credentials {}'.format(self.credentials))
self.bucket_name: str = config.bucket_name
- self.config = config
-
- super().__init__(config)
- def connect(self):
-
- if self.config.storage_provider != 's3_compatible':
- # assuming we're dealing with regular aws
- s3_url = "https://{}.s3.amazonaws.com".format(self.bucket_name)
- else:
- # we're dealing with a custom s3 compatible storage, so we need to craft the URL
- protocol = 'https' if self.config.secure.lower() == 'true' else 'http'
- port = '' if self.config.port is None else str(self.config.port)
- s3_url = '{}://{}:{}'.format(protocol, self.config.host, port)
+ self.storage_provider = config.storage_provider
- logging.info('Using S3 URL {}'.format(s3_url))
+ self.connection_extra_args = self._make_connection_arguments(config)
+ self.transfer_config = self._make_transfer_config(config)
- logging.debug('Connecting to S3')
- extra_args = {}
- if self.config.storage_provider == 's3_compatible':
- extra_args['endpoint_url'] = s3_url
- extra_args['verify'] = False
+ super().__init__(config)
+ def connect(self):
+ logging.info(
+ 'Connecting to {} with args {}'.format(
+ self.storage_provider, self.connection_extra_args
+ )
+ )
boto_config = Config(
region_name=self.credentials.region,
signature_version='v4',
tcp_keepalive=True
)
-
- self.trasnfer_config = TransferConfig(
- # we hard-code this one because the parallelism is for now applied to chunking the files
- max_concurrency=4,
- max_bandwidth=AbstractStorage._human_size_to_bytes(self.config.transfer_max_bandwidth),
- )
-
self.s3_client = boto3.client(
's3',
config=boto_config,
aws_access_key_id=self.credentials.access_key_id,
aws_secret_access_key=self.credentials.secret_access_key,
- **extra_args
+ **self.connection_extra_args
)
def disconnect(self):
@@ -157,6 +142,39 @@ class S3BaseStorage(AbstractStorage):
except Exception as e:
logging.error('Error disconnecting from S3: {}'.format(e))
+ def _make_connection_arguments(self, config) -> t.Dict[str, str]:
+
+ secure = config.secure or 'True'
+ host = config.host
+ port = config.port
+
+ if self.storage_provider != 's3_compatible':
+ # when we're dealing with regular AWS, we don't need anything extra
+ return {}
+ else:
+ # we're dealing with a custom s3 compatible storage, so we need to craft the URL
+ protocol = 'https' if secure.lower() == 'true' else 'http'
+ port = '' if port is None else str(port)
+ s3_url = '{}://{}:{}'.format(protocol, host, port)
+ return {
+ 'endpoint_url': s3_url,
+ 'verify': protocol == 'https'
+ }
+
+ def _make_transfer_config(self, config):
+
+ transfer_max_bandwidth = config.transfer_max_bandwidth or None
+
+ # we hard-code this one because the parallelism is for now applied to chunking the files
+ transfer_config = {
+ 'max_concurrency': 4
+ }
+
+ if transfer_max_bandwidth is not None:
+ transfer_config['max_bandwidth'] = AbstractStorage._human_size_to_bytes(transfer_max_bandwidth)
+
+ return TransferConfig(**transfer_config)
+
@staticmethod
def _consolidate_credentials(config) -> CensoredCredentials:
@@ -206,13 +224,13 @@ class S3BaseStorage(AbstractStorage):
async def _upload_object(self, data: io.BytesIO, object_key: str, headers: t.Dict[str, str]) -> AbstractBlob:
kms_args = {}
- if self.config.kms_id is not None:
+ if self.kms_id is not None:
kms_args['ServerSideEncryption'] = 'aws:kms'
- kms_args['SSEKMSKeyId'] = self.config.kms_id
+ kms_args['SSEKMSKeyId'] = self.kms_id
logging.debug(
'[S3 Storage] Uploading object from stream -> s3://{}/{}'.format(
- self.config.bucket_name, object_key
+ self.bucket_name, object_key
)
)
@@ -220,7 +238,7 @@ class S3BaseStorage(AbstractStorage):
# not passing in the transfer config because that is meant to cap a throughput
# here we are uploading a small-ish file so no need to cap
self.s3_client.put_object(
- Bucket=self.config.bucket_name,
+ Bucket=self.bucket_name,
Key=object_key,
Body=data,
**kms_args,
@@ -248,24 +266,24 @@ class S3BaseStorage(AbstractStorage):
# print also object size
logging.debug(
'[S3 Storage] Downloading {} -> {}/{}'.format(
- object_key, self.config.bucket_name, object_key
+ object_key, self.bucket_name, object_key
)
)
try:
self.s3_client.download_file(
- Bucket=self.config.bucket_name,
+ Bucket=self.bucket_name,
Key=object_key,
Filename=file_path,
- Config=self.trasnfer_config,
+ Config=self.transfer_config,
)
except Exception as e:
- logging.error('Error downloading file from s3://{}/{}: {}'.format(self.config.bucket_name, object_key, e))
+ logging.error('Error downloading file from s3://{}/{}: {}'.format(self.bucket_name, object_key, e))
raise ObjectDoesNotExistError('Object {} does not exist'.format(object_key))
async def _stat_blob(self, object_key: str) -> AbstractBlob:
try:
- resp = self.s3_client.head_object(Bucket=self.config.bucket_name, Key=object_key)
+ resp = self.s3_client.head_object(Bucket=self.bucket_name, Key=object_key)
item_hash = resp['ETag'].replace('"', '')
return AbstractBlob(object_key, int(resp['ContentLength']), item_hash, resp['LastModified'])
except ClientError as e:
@@ -275,7 +293,7 @@ class S3BaseStorage(AbstractStorage):
else:
# Handle other exceptions if needed
logging.error("An error occurred:", e)
- logging.error('Error getting object from s3://{}/{}'.format(self.config.bucket_name, object_key))
+ logging.error('Error getting object from s3://{}/{}'.format(self.bucket_name, object_key))
@retry(stop_max_attempt_number=MAX_UP_DOWN_LOAD_RETRIES, wait_fixed=5000)
async def _upload_blob(self, src: str, dest: str) -> ManifestObject:
@@ -290,9 +308,9 @@ class S3BaseStorage(AbstractStorage):
)
kms_args = {}
- if self.config.kms_id is not None:
+ if self.kms_id is not None:
kms_args['ServerSideEncryption'] = 'aws:kms'
- kms_args['SSEKMSKeyId'] = self.config.kms_id
+ kms_args['SSEKMSKeyId'] = self.kms_id
file_size = os.stat(src).st_size
logging.debug(
@@ -305,7 +323,7 @@ class S3BaseStorage(AbstractStorage):
Filename=src,
Bucket=self.bucket_name,
Key=object_key,
- Config=self.trasnfer_config,
+ Config=self.transfer_config,
ExtraArgs=kms_args,
)
@@ -322,12 +340,12 @@ class S3BaseStorage(AbstractStorage):
async def _delete_object(self, obj: AbstractBlob):
self.s3_client.delete_object(
- Bucket=self.config.bucket_name,
+ Bucket=self.bucket_name,
Key=obj.name
)
async def _get_blob_metadata(self, blob_key: str) -> AbstractBlobMetadata:
- resp = self.s3_client.head_object(Bucket=self.config.bucket_name, Key=blob_key)
+ resp = self.s3_client.head_object(Bucket=self.bucket_name, Key=blob_key)
# the headers come as some non-default dict, so we need to re-package them
blob_metadata = resp.get('ResponseMetadata', {}).get('HTTPHeaders', {})
|
thelastpickle/cassandra-medusa
|
951b127b7299a2ddeadb0e57567917cb6421db90
|
diff --git a/tests/storage/s3_storage_test.py b/tests/storage/s3_storage_test.py
index 79af5b2..db889e5 100644
--- a/tests/storage/s3_storage_test.py
+++ b/tests/storage/s3_storage_test.py
@@ -186,6 +186,93 @@ class S3StorageTest(unittest.TestCase):
# default AWS region
self.assertEqual('us-east-1', credentials.region)
+ def test_make_s3_url(self):
+ with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()):
+ with tempfile.NamedTemporaryFile() as empty_file:
+ config = AttributeDict({
+ 'storage_provider': 's3_us_west_oregon',
+ 'region': 'default',
+ 'key_file': empty_file.name,
+ 'api_profile': None,
+ 'kms_id': None,
+ 'transfer_max_bandwidth': None,
+ 'bucket_name': 'whatever-bucket',
+ 'secure': 'True',
+ 'host': None,
+ 'port': None,
+ })
+ s3_storage = S3BaseStorage(config)
+ # there are no extra connection args when connecting to regular S3
+ self.assertEqual(
+ dict(),
+ s3_storage.connection_extra_args
+ )
+
+ def test_make_s3_url_without_secure(self):
+ with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()):
+ with tempfile.NamedTemporaryFile() as empty_file:
+ config = AttributeDict({
+ 'storage_provider': 's3_us_west_oregon',
+ 'region': 'default',
+ 'key_file': empty_file.name,
+ 'api_profile': None,
+ 'kms_id': None,
+ 'transfer_max_bandwidth': None,
+ 'bucket_name': 'whatever-bucket',
+ 'secure': 'False',
+ 'host': None,
+ 'port': None,
+ })
+ s3_storage = S3BaseStorage(config)
+ # again, no extra connection args when connecting to regular S3
+ # we can't even disable HTTPS
+ self.assertEqual(
+ dict(),
+ s3_storage.connection_extra_args
+ )
+
+ def test_make_s3_compatible_url(self):
+ with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()):
+ with tempfile.NamedTemporaryFile() as empty_file:
+ config = AttributeDict({
+ 'storage_provider': 's3_compatible',
+ 'region': 'default',
+ 'key_file': empty_file.name,
+ 'api_profile': None,
+ 'kms_id': None,
+ 'transfer_max_bandwidth': None,
+ 'bucket_name': 'whatever-bucket',
+ 'secure': 'True',
+ 'host': 's3.example.com',
+ 'port': '443',
+ })
+ s3_storage = S3BaseStorage(config)
+ self.assertEqual(
+ 'https://s3.example.com:443',
+ s3_storage.connection_extra_args['endpoint_url']
+ )
+
+ def test_make_s3_compatible_url_without_secure(self):
+ with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()):
+ with tempfile.NamedTemporaryFile() as empty_file:
+ config = AttributeDict({
+ 'storage_provider': 's3_compatible',
+ 'region': 'default',
+ 'key_file': empty_file.name,
+ 'api_profile': None,
+ 'kms_id': None,
+ 'transfer_max_bandwidth': None,
+ 'bucket_name': 'whatever-bucket',
+ 'secure': 'False',
+ 'host': 's3.example.com',
+ 'port': '8080',
+ })
+ s3_storage = S3BaseStorage(config)
+ self.assertEqual(
+ 'http://s3.example.com:8080',
+ s3_storage.connection_extra_args['endpoint_url']
+ )
+
def _make_instance_metadata_mock():
# mock a call to the metadata service
|
Refactoring: s3 base storage - move url creation to a method
[Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=36417073)
The constructor of s3_base_storage is too big. One improvement is to move out the S3 API URL creation to a separate method.
## Definition of done
- [ ] S3 URL API creation is extracted to a separate method.
|
0.0
|
951b127b7299a2ddeadb0e57567917cb6421db90
|
[
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url_without_secure",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url_without_secure"
] |
[
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_env_without_profile",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_everything",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_file",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_metadata",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region_and_s3_compatible_storage",
"tests/storage/s3_storage_test.py::S3StorageTest::test_legacy_provider_region_replacement"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-21 10:55:50+00:00
|
apache-2.0
| 5,874 |
|
thelastpickle__cassandra-medusa-685
|
diff --git a/docs/azure_blobs_setup.md b/docs/azure_blobs_setup.md
index 135b7c5..447dbc7 100644
--- a/docs/azure_blobs_setup.md
+++ b/docs/azure_blobs_setup.md
@@ -11,15 +11,7 @@ Create a new storage account or use an existing one which will be used to store
"key": "YOUR_KEY"
}
```
-If you need to set a different host for Azure (for example the host for Azure Gov is `<storageAccount>.blob.core.usgovcloudapi.net`), please ADDITIONALLY set these two fields in the JSON file (the connection string can be found with the key):
-
-```
-"host": "YOUR_HOST"
-"connection_string": "YOUR_CONNECTION_STRING"
-
-```
-
-Place this file on all Apache Cassandra™ nodes running medusa under `/etc/medusa/`and set the rigths appropriately so that onyl users running Medusa can read/modify it.
+Place this file on all Apache Cassandra™ nodes running medusa under `/etc/medusa/`and set the rights appropriately so that only users running Medusa can read/modify it.
### Create a container
@@ -36,3 +28,8 @@ key_file = /etc/medusa/medusa-azure-credentials
Medusa should now be able to access the bucket and perform all required operations.
+If you need to set a different host for Azure (for example the host for Azure Gov is `<storageAccount>.blob.core.usgovcloudapi.net`), please use the `host` parameter in the `[storage]` section of `/etc/medusa/medusa.ini`:
+
+```
+"host": "usgovcloudapi.net"
+```
diff --git a/medusa/storage/azure_storage.py b/medusa/storage/azure_storage.py
index 9faee5a..9d25b79 100644
--- a/medusa/storage/azure_storage.py
+++ b/medusa/storage/azure_storage.py
@@ -49,15 +49,25 @@ class AzureStorage(AbstractStorage):
self.account_name = self.credentials.named_key.name
self.bucket_name = config.bucket_name
+ self.azure_blob_service_url = self._make_blob_service_url(self.account_name, config)
+
# disable chatty loggers
logging.getLogger('azure.core.pipeline.policies.http_logging_policy').setLevel(logging.WARNING)
logging.getLogger('chardet.universaldetector').setLevel(logging.WARNING)
super().__init__(config)
+ def _make_blob_service_url(self, account_name, config):
+ domain = 'windows.net' if config.host is None else config.host
+ if config.port is None:
+ url = f"https://{account_name}.blob.core.{domain}/"
+ else:
+ url = f"https://{account_name}.blob.core.{domain}:{config.port}/"
+ return url
+
def connect(self):
self.azure_blob_service = BlobServiceClient(
- account_url=f"https://{self.account_name}.blob.core.windows.net/",
+ account_url=self.azure_blob_service_url,
credential=self.credentials
)
self.azure_container_client = self.azure_blob_service.get_container_client(self.bucket_name)
|
thelastpickle/cassandra-medusa
|
dca04bc05ead2998241301be0084680b12e0502b
|
diff --git a/tests/storage/abstract_storage_test.py b/tests/storage/abstract_storage_test.py
index 6051142..5662483 100644
--- a/tests/storage/abstract_storage_test.py
+++ b/tests/storage/abstract_storage_test.py
@@ -18,7 +18,13 @@ import unittest
from medusa.storage.abstract_storage import AbstractStorage
-class S3StorageTest(unittest.TestCase):
+class AttributeDict(dict):
+ __slots__ = ()
+ __getattr__ = dict.__getitem__
+ __setattr__ = dict.__setitem__
+
+
+class AbstractStorageTest(unittest.TestCase):
def test_convert_human_friendly_size_to_bytes(self):
self.assertEqual(50, AbstractStorage._human_size_to_bytes('50B'))
diff --git a/tests/storage/azure_storage_test.py b/tests/storage/azure_storage_test.py
new file mode 100644
index 0000000..7809eac
--- /dev/null
+++ b/tests/storage/azure_storage_test.py
@@ -0,0 +1,79 @@
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import tempfile
+import unittest
+
+from medusa.storage.azure_storage import AzureStorage
+from tests.storage.abstract_storage_test import AttributeDict
+
+
+class AzureStorageTest(unittest.TestCase):
+
+ credentials_file_content = """
+ {
+ "storage_account": "medusa-unit-test",
+ "key": "randomString=="
+ }
+ """
+
+ def test_make_connection_url(self):
+ with tempfile.NamedTemporaryFile() as credentials_file:
+ credentials_file.write(self.credentials_file_content.encode())
+ credentials_file.flush()
+ config = AttributeDict({
+ 'region': 'region-from-config',
+ 'storage_provider': 'azure_blobs',
+ 'key_file': credentials_file.name,
+ 'bucket_name': 'bucket-from-config',
+ 'concurrent_transfers': '1',
+ 'host': None,
+ 'port': None,
+ })
+ azure_storage = AzureStorage(config)
+ self.assertEqual(
+ 'https://medusa-unit-test.blob.core.windows.net/',
+ azure_storage.azure_blob_service_url
+ )
+
+ def test_make_connection_url_with_custom_host(self):
+ with tempfile.NamedTemporaryFile() as credentials_file:
+ credentials_file.write(self.credentials_file_content.encode())
+ credentials_file.flush()
+ config = AttributeDict({
+ 'region': 'region-from-config',
+ 'storage_provider': 'azure_blobs',
+ 'key_file': credentials_file.name,
+ 'bucket_name': 'bucket-from-config',
+ 'concurrent_transfers': '1',
+ 'host': 'custom.host.net',
+ 'port': None,
+ })
+ azure_storage = AzureStorage(config)
+ self.assertEqual(
+ 'https://medusa-unit-test.blob.core.custom.host.net/',
+ azure_storage.azure_blob_service_url
+ )
+
+ def test_make_connection_url_with_custom_host_port(self):
+ with tempfile.NamedTemporaryFile() as credentials_file:
+ credentials_file.write(self.credentials_file_content.encode())
+ credentials_file.flush()
+ config = AttributeDict({
+ 'region': 'region-from-config',
+ 'storage_provider': 'azure_blobs',
+ 'key_file': credentials_file.name,
+ 'bucket_name': 'bucket-from-config',
+ 'concurrent_transfers': '1',
+ 'host': 'custom.host.net',
+ 'port': 123,
+ })
+ azure_storage = AzureStorage(config)
+ self.assertEqual(
+ 'https://medusa-unit-test.blob.core.custom.host.net:123/',
+ azure_storage.azure_blob_service_url
+ )
diff --git a/tests/storage/google_storage_test.py b/tests/storage/google_storage_test.py
index 53bfd6d..891b49f 100644
--- a/tests/storage/google_storage_test.py
+++ b/tests/storage/google_storage_test.py
@@ -22,7 +22,7 @@ from pathlib import Path
from medusa.storage.google_storage import _group_by_parent, _is_in_folder
-class RestoreNodeTest(unittest.TestCase):
+class GoogleStorageTest(unittest.TestCase):
def test_is_in_folder(self):
folder = Path('foo/bar')
diff --git a/tests/storage/s3_storage_test.py b/tests/storage/s3_storage_test.py
index 08e98c6..11a5daa 100644
--- a/tests/storage/s3_storage_test.py
+++ b/tests/storage/s3_storage_test.py
@@ -21,12 +21,7 @@ import tempfile
from unittest.mock import patch, MagicMock
from medusa.storage.s3_base_storage import S3BaseStorage
-
-
-class AttributeDict(dict):
- __slots__ = ()
- __getattr__ = dict.__getitem__
- __setattr__ = dict.__setitem__
+from tests.storage.abstract_storage_test import AttributeDict
class S3StorageTest(unittest.TestCase):
|
Azure Storage Support for different Cloud Contexts (e.g. AzureUSGovernment)
[Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=43950753)
## Overview
Azure Storage looks to be configured for only Commercial cloud on line https://github.com/thelastpickle/cassandra-medusa/blob/master/medusa/storage/azure_storage.py#L58 _windows.net_
In AzureUSGovernment cloud, the URLs end with `usgovcloudapi.net`.
The Azure SDK allows for the appropriate cloud context to be set, and with the original azure-cli implementation of storage that was possible `< 0.16.0`.
## The Ask
The connection string for Azure Blob Storage should support a conditional so that it can *either* be the current `blob.core.windows.net` for "Azure" cloud or `blob.core.usgovcloudapi.net` for "AzureUSGovernment" cloud.
This should be configurable in the medusa configuration file, rather than hard-coded in the above-referenced file.
_I'd code this up and offer a patch, but I don't immediately have the time, and hoped that the maintainers and existing contributors could tackle that faster than I can_
## Additional Azure References
https://learn.microsoft.com/en-us/azure/azure-government/compare-azure-government-global-azure

|
0.0
|
dca04bc05ead2998241301be0084680b12e0502b
|
[
"tests/storage/azure_storage_test.py::AzureStorageTest::test_make_connection_url",
"tests/storage/azure_storage_test.py::AzureStorageTest::test_make_connection_url_with_custom_host",
"tests/storage/azure_storage_test.py::AzureStorageTest::test_make_connection_url_with_custom_host_port"
] |
[
"tests/storage/abstract_storage_test.py::AbstractStorageTest::test_convert_human_friendly_size_to_bytes",
"tests/storage/google_storage_test.py::GoogleStorageTest::test_group_by_parent",
"tests/storage/google_storage_test.py::GoogleStorageTest::test_is_in_folder",
"tests/storage/google_storage_test.py::GoogleStorageTest::test_iterator_hierarchy",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_env_without_profile",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_everything",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_file",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_metadata",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region_and_s3_compatible_storage",
"tests/storage/s3_storage_test.py::S3StorageTest::test_legacy_provider_region_replacement",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url_without_secure",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url_without_secure"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-17 15:54:37+00:00
|
apache-2.0
| 5,875 |
|
thelastpickle__cassandra-medusa-701
|
diff --git a/medusa-example.ini b/medusa-example.ini
index da5ad97..9d7b4d3 100644
--- a/medusa-example.ini
+++ b/medusa-example.ini
@@ -112,6 +112,10 @@ use_sudo_for_restore = True
; Configures the use of SSL to connect to the object storage system.
;secure = True
+; Enables verification of certificates used in case secure is set to True.
+; Enabling this is not yet supported - we don't have a good way to configure paths to custom certificates.
+; ssl_verify = False
+
;aws_cli_path = <Location of the aws cli binary if not in PATH>
[monitoring]
diff --git a/medusa/config.py b/medusa/config.py
index d8ec0e7..60d1477 100644
--- a/medusa/config.py
+++ b/medusa/config.py
@@ -30,8 +30,8 @@ StorageConfig = collections.namedtuple(
'StorageConfig',
['bucket_name', 'key_file', 'prefix', 'fqdn', 'host_file_separator', 'storage_provider',
'base_path', 'max_backup_age', 'max_backup_count', 'api_profile', 'transfer_max_bandwidth',
- 'concurrent_transfers', 'multi_part_upload_threshold', 'host', 'region', 'port', 'secure', 'aws_cli_path',
- 'kms_id', 'backup_grace_period_in_days', 'use_sudo_for_restore', 'k8s_mode']
+ 'concurrent_transfers', 'multi_part_upload_threshold', 'host', 'region', 'port', 'secure', 'ssl_verify',
+ 'aws_cli_path', 'kms_id', 'backup_grace_period_in_days', 'use_sudo_for_restore', 'k8s_mode']
)
CassandraConfig = collections.namedtuple(
@@ -111,6 +111,7 @@ def _build_default_config():
'concurrent_transfers': '1',
'multi_part_upload_threshold': str(20 * 1024 * 1024),
'secure': 'True',
+ 'ssl_verify': 'False', # False until we work out how to specify custom certs
'aws_cli_path': 'aws',
'fqdn': socket.getfqdn(),
'region': 'default',
diff --git a/medusa/storage/s3_base_storage.py b/medusa/storage/s3_base_storage.py
index 8e5b51e..ad6648e 100644
--- a/medusa/storage/s3_base_storage.py
+++ b/medusa/storage/s3_base_storage.py
@@ -162,6 +162,7 @@ class S3BaseStorage(AbstractStorage):
def _make_connection_arguments(self, config) -> t.Dict[str, str]:
secure = config.secure or 'True'
+ ssl_verify = config.ssl_verify or 'False' # False until we work out how to specify custom certs
host = config.host
port = config.port
@@ -175,7 +176,7 @@ class S3BaseStorage(AbstractStorage):
s3_url = '{}://{}:{}'.format(protocol, host, port)
return {
'endpoint_url': s3_url,
- 'verify': protocol == 'https'
+ 'verify': ssl_verify.lower() == 'true'
}
def _make_transfer_config(self, config):
|
thelastpickle/cassandra-medusa
|
50e1dad860ce78301da46f6fbac2cd8eb982509d
|
diff --git a/tests/storage/s3_storage_test.py b/tests/storage/s3_storage_test.py
index 0ae0719..acbaa6d 100644
--- a/tests/storage/s3_storage_test.py
+++ b/tests/storage/s3_storage_test.py
@@ -217,6 +217,7 @@ class S3StorageTest(unittest.TestCase):
'transfer_max_bandwidth': None,
'bucket_name': 'whatever-bucket',
'secure': 'True',
+ 'ssl_verify': 'False',
'host': None,
'port': None,
'concurrent_transfers': '1'
@@ -240,6 +241,7 @@ class S3StorageTest(unittest.TestCase):
'transfer_max_bandwidth': None,
'bucket_name': 'whatever-bucket',
'secure': 'False',
+ 'ssl_verify': 'False',
'host': None,
'port': None,
'concurrent_transfers': '1'
@@ -264,6 +266,7 @@ class S3StorageTest(unittest.TestCase):
'transfer_max_bandwidth': None,
'bucket_name': 'whatever-bucket',
'secure': 'True',
+ 'ssl_verify': 'False',
'host': 's3.example.com',
'port': '443',
'concurrent_transfers': '1'
@@ -286,6 +289,7 @@ class S3StorageTest(unittest.TestCase):
'transfer_max_bandwidth': None,
'bucket_name': 'whatever-bucket',
'secure': 'False',
+ 'ssl_verify': 'False',
'host': 's3.example.com',
'port': '8080',
'concurrent_transfers': '1'
@@ -296,6 +300,46 @@ class S3StorageTest(unittest.TestCase):
s3_storage.connection_extra_args['endpoint_url']
)
+ def test_make_connection_arguments_without_ssl_verify(self):
+ with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()):
+ config = AttributeDict({
+ 'storage_provider': 's3_compatible',
+ 'region': 'default',
+ 'key_file': '/tmp/whatever',
+ 'api_profile': None,
+ 'kms_id': None,
+ 'transfer_max_bandwidth': None,
+ 'bucket_name': 'whatever-bucket',
+ 'secure': 'False',
+ 'ssl_verify': 'False',
+ 'host': 's3.example.com',
+ 'port': '8080',
+ 'concurrent_transfers': '1'
+ })
+ s3_storage = S3BaseStorage(config)
+ connection_args = s3_storage._make_connection_arguments(config)
+ self.assertEqual(False, connection_args['verify'])
+
+ def test_make_connection_arguments_with_ssl_verify(self):
+ with patch('botocore.httpsession.URLLib3Session', return_value=_make_instance_metadata_mock()):
+ config = AttributeDict({
+ 'storage_provider': 's3_compatible',
+ 'region': 'default',
+ 'key_file': '/tmp/whatever',
+ 'api_profile': None,
+ 'kms_id': None,
+ 'transfer_max_bandwidth': None,
+ 'bucket_name': 'whatever-bucket',
+ 'secure': 'False',
+ 'ssl_verify': 'True',
+ 'host': 's3.example.com',
+ 'port': '8080',
+ 'concurrent_transfers': '1'
+ })
+ s3_storage = S3BaseStorage(config)
+ connection_args = s3_storage._make_connection_arguments(config)
+ self.assertEqual(True, connection_args['verify'])
+
def test_assume_role_authentication(self):
with patch('botocore.httpsession.URLLib3Session', new=_make_assume_role_with_web_identity_mock()):
if os.environ.get('AWS_ACCESS_KEY_ID', None):
@@ -328,6 +372,7 @@ class S3StorageTest(unittest.TestCase):
'transfer_max_bandwidth': None,
'bucket_name': 'whatever-bucket',
'secure': 'True',
+ 'ssl_verify': 'False',
'host': None,
'port': None,
'concurrent_transfers': '1'
|
Allow enabling/disabling SSL certificate verification
[Project board link](https://github.com/orgs/k8ssandra/projects/8/views/1?pane=issue&itemId=49576848)
We currently force SSL certificate verification as soon as HTTPs is enabled (via the storage.secure config setting).
It turned out there's an issue with this. It's not clear how to point medusa/boto/etc to use a custom certificate. It always uses the ones from the certify python package.
A proposed solution to this is to add a brand new setting, `ssl_verify` to control if we want to validate the certs or not.
|
0.0
|
50e1dad860ce78301da46f6fbac2cd8eb982509d
|
[
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_connection_arguments_with_ssl_verify"
] |
[
"tests/storage/s3_storage_test.py::S3StorageTest::test_assume_role_authentication",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_env_without_profile",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_everything",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_file",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_from_metadata",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region",
"tests/storage/s3_storage_test.py::S3StorageTest::test_credentials_with_default_region_and_s3_compatible_storage",
"tests/storage/s3_storage_test.py::S3StorageTest::test_legacy_provider_region_replacement",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_connection_arguments_without_ssl_verify",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_compatible_url_without_secure",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url",
"tests/storage/s3_storage_test.py::S3StorageTest::test_make_s3_url_without_secure"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-10 15:58:31+00:00
|
apache-2.0
| 5,876 |
|
theolind__pymysensors-122
|
diff --git a/mysensors/__init__.py b/mysensors/__init__.py
index 520165a..ef9dec3 100644
--- a/mysensors/__init__.py
+++ b/mysensors/__init__.py
@@ -31,6 +31,10 @@ def get_const(protocol_version):
version = protocol_version
if parse_ver('1.5') <= parse_ver(version) < parse_ver('2.0'):
path = 'mysensors.const_15'
+ elif parse_ver(version) >= parse_ver('2.2'):
+ path = 'mysensors.const_22'
+ elif parse_ver(version) >= parse_ver('2.1'):
+ path = 'mysensors.const_21'
elif parse_ver(version) >= parse_ver('2.0'):
path = 'mysensors.const_20'
else:
@@ -127,8 +131,8 @@ class Gateway(object):
type=self.const.MessageType.set, payload=value)
return None
- def _handle_heartbeat(self, msg):
- """Process a heartbeat message."""
+ def _handle_smartsleep(self, msg):
+ """Process a message before going back to smartsleep."""
if not self.is_sensor(msg.node_id):
return
while self.sensors[msg.node_id].queue:
@@ -411,8 +415,8 @@ class Gateway(object):
thread has sent all previously queued commands to the FIFO queue.
If the sensor attribute new_state returns True, the command will not be
put on the queue, but the internal sensor state will be updated. When a
- heartbeat response is received, the internal state will be pushed to
- the sensor, via _handle_heartbeat method.
+ smartsleep message is received, the internal state will be pushed to
+ the sensor, via _handle_smartsleep method.
"""
if not self.is_sensor(sensor_id, child_id):
return
diff --git a/mysensors/const_20.py b/mysensors/const_20.py
index 98ff4fd..a27b6cd 100644
--- a/mysensors/const_20.py
+++ b/mysensors/const_20.py
@@ -1,4 +1,4 @@
-"""MySensors constants for version 1.5 of MySensors."""
+"""MySensors constants for version 2.0 of MySensors."""
from enum import IntEnum
import voluptuous as vol
@@ -246,11 +246,6 @@ class Internal(IntEnum):
I_REGISTRATION_REQUEST = 26 # Register request to GW
I_REGISTRATION_RESPONSE = 27 # Register response from GW
I_DEBUG = 28 # Debug message
- I_SIGNAL_REPORT_REQUEST = 29 # Device signal strength request
- I_SIGNAL_REPORT_REVERSE = 30 # Internal
- I_SIGNAL_REPORT_RESPONSE = 31 # Device signal strength response (RSSI)
- I_PRE_SLEEP_NOTIFICATION = 32 # Message sent before node is going to sleep
- I_POST_SLEEP_NOTIFICATION = 33 # Message sent after node woke up
class Stream(IntEnum):
@@ -344,15 +339,6 @@ VALID_INTERNAL.update({
Internal.I_REGISTRATION_REQUEST: str,
Internal.I_REGISTRATION_RESPONSE: str,
Internal.I_DEBUG: str,
- Internal.I_SIGNAL_REPORT_REQUEST: str,
- Internal.I_SIGNAL_REPORT_REVERSE: vol.All(
- vol.Coerce(int), vol.Coerce(str)),
- Internal.I_SIGNAL_REPORT_RESPONSE: vol.All(
- vol.Coerce(int), vol.Coerce(str)),
- Internal.I_PRE_SLEEP_NOTIFICATION: vol.All(
- vol.Coerce(int), vol.Coerce(str)),
- Internal.I_POST_SLEEP_NOTIFICATION: vol.All(
- vol.Coerce(int), vol.Coerce(str)),
})
VALID_PAYLOADS = {
@@ -370,7 +356,7 @@ HANDLE_INTERNAL.update({
'node_id': 255, 'ack': 0, 'sub_type': Internal.I_DISCOVER,
'payload': ''}},
Internal.I_HEARTBEAT_RESPONSE: {
- 'fun': '_handle_heartbeat'},
+ 'fun': '_handle_smartsleep'},
Internal.I_DISCOVER_RESPONSE: {
'is_sensor': True},
})
diff --git a/mysensors/const_21.py b/mysensors/const_21.py
new file mode 100644
index 0000000..7e08924
--- /dev/null
+++ b/mysensors/const_21.py
@@ -0,0 +1,107 @@
+"""MySensors constants for version 2.1 of MySensors."""
+from enum import IntEnum
+
+# pylint: disable=unused-import
+from mysensors.const_20 import (HANDLE_INTERNAL, MAX_NODE_ID, # noqa: F401
+ VALID_INTERNAL, VALID_PRESENTATION,
+ VALID_SETREQ, VALID_STREAM, VALID_TYPES,
+ MessageType, Presentation, SetReq, Stream)
+
+
+class Internal(IntEnum):
+ """MySensors internal sub-types."""
+
+ # pylint: disable=too-few-public-methods
+ # Use this to report the battery level (in percent 0-100).
+ I_BATTERY_LEVEL = 0
+ # Sensors can request the current time from the Controller using this
+ # message. The time will be reported as the seconds since 1970
+ I_TIME = 1
+ # Sensors report their library version at startup using this message type
+ I_VERSION = 2
+ # Use this to request a unique node id from the controller.
+ I_ID_REQUEST = 3
+ # Id response back to sensor. Payload contains sensor id.
+ I_ID_RESPONSE = 4
+ # Start/stop inclusion mode of the Controller (1=start, 0=stop).
+ I_INCLUSION_MODE = 5
+ # Config request from node. Reply with (M)etric or (I)mperal back to sensor
+ I_CONFIG = 6
+ # When a sensor starts up, it broadcast a search request to all neighbor
+ # nodes. They reply with a I_FIND_PARENT_RESPONSE.
+ I_FIND_PARENT_REQUEST = 7
+ I_FIND_PARENT = 7 # alias from version 2.0
+ # Reply message type to I_FIND_PARENT request.
+ I_FIND_PARENT_RESPONSE = 8
+ # Sent by the gateway to the Controller to trace-log a message
+ I_LOG_MESSAGE = 9
+ # A message that can be used to transfer child sensors
+ # (from EEPROM routing table) of a repeating node.
+ I_CHILDREN = 10
+ # Optional sketch name that can be used to identify sensor in the
+ # Controller GUI
+ I_SKETCH_NAME = 11
+ # Optional sketch version that can be reported to keep track of the version
+ # of sensor in the Controller GUI.
+ I_SKETCH_VERSION = 12
+ # Used by OTA firmware updates. Request for node to reboot.
+ I_REBOOT = 13
+ # Send by gateway to controller when startup is complete
+ I_GATEWAY_READY = 14
+ # Provides signing related preferences (first byte is preference version).
+ I_SIGNING_PRESENTATION = 15
+ I_REQUEST_SIGNING = 15 # alias from version 1.5
+ # Request for a nonce.
+ I_NONCE_REQUEST = 16
+ I_GET_NONCE = 16 # alias from version 1.5
+ # Payload is nonce data.
+ I_NONCE_RESPONSE = 17
+ I_GET_NONCE_RESPONSE = 17 # alias from version 1.5
+ I_HEARTBEAT_REQUEST = 18
+ I_HEARTBEAT = 18 # alias from version 2.0
+ I_PRESENTATION = 19
+ I_DISCOVER_REQUEST = 20
+ I_DISCOVER = 20 # alias from version 2.0
+ I_DISCOVER_RESPONSE = 21
+ I_HEARTBEAT_RESPONSE = 22
+ # Node is locked (reason in string-payload).
+ I_LOCKED = 23
+ I_PING = 24 # Ping sent to node, payload incremental hop counter
+ # In return to ping, sent back to sender, payload incremental hop counter
+ I_PONG = 25
+ I_REGISTRATION_REQUEST = 26 # Register request to GW
+ I_REGISTRATION_RESPONSE = 27 # Register response from GW
+ I_DEBUG = 28 # Debug message
+
+
+VALID_MESSAGE_TYPES = {
+ MessageType.presentation: list(Presentation),
+ MessageType.set: list(SetReq),
+ MessageType.req: list(SetReq),
+ MessageType.internal: list(Internal),
+ MessageType.stream: list(Stream),
+}
+
+
+VALID_INTERNAL = dict(VALID_INTERNAL)
+VALID_INTERNAL.update({
+ Internal.I_FIND_PARENT_REQUEST: '',
+ Internal.I_HEARTBEAT_REQUEST: '',
+ Internal.I_DISCOVER_REQUEST: '',
+})
+
+VALID_PAYLOADS = {
+ MessageType.presentation: VALID_PRESENTATION,
+ MessageType.set: VALID_SETREQ,
+ MessageType.req: {member: '' for member in list(SetReq)},
+ MessageType.internal: VALID_INTERNAL,
+ MessageType.stream: VALID_STREAM,
+}
+
+HANDLE_INTERNAL = dict(HANDLE_INTERNAL)
+HANDLE_INTERNAL.update({
+ Internal.I_GATEWAY_READY: {
+ 'log': 'info', 'msg': {
+ 'node_id': 255, 'ack': 0, 'sub_type': Internal.I_DISCOVER_REQUEST,
+ 'payload': ''}},
+})
diff --git a/mysensors/const_22.py b/mysensors/const_22.py
new file mode 100644
index 0000000..6289960
--- /dev/null
+++ b/mysensors/const_22.py
@@ -0,0 +1,119 @@
+"""MySensors constants for version 2.2 of MySensors."""
+from enum import IntEnum
+
+import voluptuous as vol
+
+# pylint: disable=unused-import
+from mysensors.const_21 import (HANDLE_INTERNAL, MAX_NODE_ID, # noqa: F401
+ VALID_INTERNAL, VALID_PRESENTATION,
+ VALID_SETREQ, VALID_STREAM, VALID_TYPES,
+ MessageType, Presentation, SetReq, Stream)
+
+
+class Internal(IntEnum):
+ """MySensors internal sub-types."""
+
+ # pylint: disable=too-few-public-methods
+ # Use this to report the battery level (in percent 0-100).
+ I_BATTERY_LEVEL = 0
+ # Sensors can request the current time from the Controller using this
+ # message. The time will be reported as the seconds since 1970
+ I_TIME = 1
+ # Sensors report their library version at startup using this message type
+ I_VERSION = 2
+ # Use this to request a unique node id from the controller.
+ I_ID_REQUEST = 3
+ # Id response back to sensor. Payload contains sensor id.
+ I_ID_RESPONSE = 4
+ # Start/stop inclusion mode of the Controller (1=start, 0=stop).
+ I_INCLUSION_MODE = 5
+ # Config request from node. Reply with (M)etric or (I)mperal back to sensor
+ I_CONFIG = 6
+ # When a sensor starts up, it broadcast a search request to all neighbor
+ # nodes. They reply with a I_FIND_PARENT_RESPONSE.
+ I_FIND_PARENT_REQUEST = 7
+ I_FIND_PARENT = 7 # alias from version 2.0
+ # Reply message type to I_FIND_PARENT request.
+ I_FIND_PARENT_RESPONSE = 8
+ # Sent by the gateway to the Controller to trace-log a message
+ I_LOG_MESSAGE = 9
+ # A message that can be used to transfer child sensors
+ # (from EEPROM routing table) of a repeating node.
+ I_CHILDREN = 10
+ # Optional sketch name that can be used to identify sensor in the
+ # Controller GUI
+ I_SKETCH_NAME = 11
+ # Optional sketch version that can be reported to keep track of the version
+ # of sensor in the Controller GUI.
+ I_SKETCH_VERSION = 12
+ # Used by OTA firmware updates. Request for node to reboot.
+ I_REBOOT = 13
+ # Send by gateway to controller when startup is complete
+ I_GATEWAY_READY = 14
+ # Provides signing related preferences (first byte is preference version).
+ I_SIGNING_PRESENTATION = 15
+ I_REQUEST_SIGNING = 15 # alias from version 1.5
+ # Request for a nonce.
+ I_NONCE_REQUEST = 16
+ I_GET_NONCE = 16 # alias from version 1.5
+ # Payload is nonce data.
+ I_NONCE_RESPONSE = 17
+ I_GET_NONCE_RESPONSE = 17 # alias from version 1.5
+ I_HEARTBEAT_REQUEST = 18
+ I_HEARTBEAT = 18 # alias from version 2.0
+ I_PRESENTATION = 19
+ I_DISCOVER_REQUEST = 20
+ I_DISCOVER = 20 # alias from version 2.0
+ I_DISCOVER_RESPONSE = 21
+ I_HEARTBEAT_RESPONSE = 22
+ # Node is locked (reason in string-payload).
+ I_LOCKED = 23
+ I_PING = 24 # Ping sent to node, payload incremental hop counter
+ # In return to ping, sent back to sender, payload incremental hop counter
+ I_PONG = 25
+ I_REGISTRATION_REQUEST = 26 # Register request to GW
+ I_REGISTRATION_RESPONSE = 27 # Register response from GW
+ I_DEBUG = 28 # Debug message
+ I_SIGNAL_REPORT_REQUEST = 29 # Device signal strength request
+ I_SIGNAL_REPORT_REVERSE = 30 # Internal
+ I_SIGNAL_REPORT_RESPONSE = 31 # Device signal strength response (RSSI)
+ I_PRE_SLEEP_NOTIFICATION = 32 # Message sent before node is going to sleep
+ I_POST_SLEEP_NOTIFICATION = 33 # Message sent after node woke up
+
+
+VALID_MESSAGE_TYPES = {
+ MessageType.presentation: list(Presentation),
+ MessageType.set: list(SetReq),
+ MessageType.req: list(SetReq),
+ MessageType.internal: list(Internal),
+ MessageType.stream: list(Stream),
+}
+
+
+VALID_INTERNAL = dict(VALID_INTERNAL)
+VALID_INTERNAL.update({
+ Internal.I_SIGNAL_REPORT_REQUEST: str,
+ Internal.I_SIGNAL_REPORT_REVERSE: vol.All(
+ vol.Coerce(int), vol.Coerce(str)),
+ Internal.I_SIGNAL_REPORT_RESPONSE: vol.All(
+ vol.Coerce(int), vol.Coerce(str)),
+ Internal.I_PRE_SLEEP_NOTIFICATION: vol.All(
+ vol.Coerce(int), vol.Coerce(str)),
+ Internal.I_POST_SLEEP_NOTIFICATION: vol.All(
+ vol.Coerce(int), vol.Coerce(str)),
+})
+
+VALID_PAYLOADS = {
+ MessageType.presentation: VALID_PRESENTATION,
+ MessageType.set: VALID_SETREQ,
+ MessageType.req: {member: '' for member in list(SetReq)},
+ MessageType.internal: VALID_INTERNAL,
+ MessageType.stream: VALID_STREAM,
+}
+
+HANDLE_INTERNAL = dict(HANDLE_INTERNAL)
+HANDLE_INTERNAL.pop(Internal.I_HEARTBEAT_RESPONSE, None)
+HANDLE_INTERNAL.update({
+ Internal.I_PRE_SLEEP_NOTIFICATION: {
+ 'fun': '_handle_smartsleep'},
+})
|
theolind/pymysensors
|
b6deffc604865bba583bf82c089fdfc1d38da4e8
|
diff --git a/tests/test_message.py b/tests/test_message.py
index bdd7dda..aac8a1b 100644
--- a/tests/test_message.py
+++ b/tests/test_message.py
@@ -162,6 +162,19 @@ INTERNAL_FIXTURES_20.update({
'I_REGISTRATION_REQUEST': '2.0.0',
'I_REGISTRATION_RESPONSE': '1',
'I_DEBUG': 'test debug',
+})
+
+
+INTERNAL_FIXTURES_21 = dict(INTERNAL_FIXTURES_20)
+INTERNAL_FIXTURES_21.update({
+ 'I_FIND_PARENT_REQUEST': '',
+ 'I_HEARTBEAT_REQUEST': '',
+ 'I_DISCOVER_REQUEST': '',
+})
+
+
+INTERNAL_FIXTURES_22 = dict(INTERNAL_FIXTURES_21)
+INTERNAL_FIXTURES_22.update({
'I_SIGNAL_REPORT_REQUEST': 'test',
'I_SIGNAL_REPORT_REVERSE': '123',
'I_SIGNAL_REPORT_RESPONSE': '123',
@@ -274,7 +287,8 @@ def test_validate_internal():
"""Test Internal messages."""
versions = [
('1.4', INTERNAL_FIXTURES_14), ('1.5', INTERNAL_FIXTURES_15),
- ('2.0', INTERNAL_FIXTURES_20)]
+ ('2.0', INTERNAL_FIXTURES_20), ('2.1', INTERNAL_FIXTURES_21),
+ ('2.2', INTERNAL_FIXTURES_22)]
for protocol_version, fixture in versions:
gateway = get_gateway(protocol_version)
const = get_const(protocol_version)
@@ -287,7 +301,15 @@ def test_validate_internal():
return_value = None
sub_type = const.Internal[name]
msg = Message('1;255;3;0;{};{}\n'.format(sub_type, _payload))
- valid = msg.validate(protocol_version)
+ try:
+ valid = msg.validate(protocol_version)
+ except vol.MultipleInvalid:
+ print('fixture version: ', protocol_version)
+ print('gateway version: ', gateway.protocol_version)
+ print('name: ', name)
+ print('subtype: ', sub_type)
+ print('payload: ', _payload)
+ raise
assert valid == {
'node_id': 1, 'child_id': 255, 'type': 3, 'ack': 0,
'sub_type': sub_type, 'payload': _payload}
diff --git a/tests/test_mysensors.py b/tests/test_mysensors.py
index 56879bd..e9eb14b 100644
--- a/tests/test_mysensors.py
+++ b/tests/test_mysensors.py
@@ -668,8 +668,8 @@ class TestGateway20(TestGateway):
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;255;3;0;19;\n')
- def test_heartbeat(self):
- """Test heartbeat message."""
+ def test_smartsleep(self):
+ """Test smartsleep feature."""
sensor = self._add_sensor(1)
sensor.children[0] = ChildSensor(
0, self.gateway.const.Presentation.S_LIGHT_LEVEL)
@@ -708,8 +708,8 @@ class TestGateway20(TestGateway):
# nothing has changed
self.assertEqual(ret, None)
- def test_heartbeat_from_unknown(self):
- """Test heartbeat message from unknown node."""
+ def test_smartsleep_from_unknown(self):
+ """Test smartsleep message from unknown node."""
self.gateway.logic('1;255;3;0;22;\n')
ret = self.gateway.handle_queue()
self.assertEqual(ret, '1;255;3;0;19;\n')
@@ -774,6 +774,81 @@ class TestGateway20(TestGateway):
'10.0,10.0,10.0')
+class TestGateway21(TestGateway20):
+ """Use protocol_version 2.1."""
+
+ def setUp(self):
+ """Set up gateway."""
+ self.gateway = Gateway(protocol_version='2.1')
+
+
+class TestGateway22(TestGateway21):
+ """Use protocol_version 2.2."""
+
+ def setUp(self):
+ """Set up gateway."""
+ self.gateway = Gateway(protocol_version='2.2')
+
+ def test_smartsleep(self):
+ """Test smartsleep feature."""
+ sensor = self._add_sensor(1)
+ sensor.children[0] = ChildSensor(
+ 0, self.gateway.const.Presentation.S_LIGHT_LEVEL)
+ self.gateway.logic('1;0;1;0;23;43\n')
+ ret = self.gateway.handle_queue()
+ self.assertEqual(ret, None)
+ # pre sleep message
+ self.gateway.logic('1;255;3;0;32;500\n')
+ ret = self.gateway.handle_queue()
+ # nothing has changed
+ self.assertEqual(ret, None)
+ # change from controller side
+ self.gateway.set_child_value(
+ 1, 0, self.gateway.const.SetReq.V_LIGHT_LEVEL, '57')
+ ret = self.gateway.handle_queue()
+ # no pre sleep message
+ self.assertEqual(ret, None)
+ # pre sleep message comes in
+ self.gateway.logic('1;255;3;0;32;500\n')
+ ret = self.gateway.handle_queue()
+ # instance responds with new values
+ self.assertEqual(ret, '1;0;1;0;23;57\n')
+ # request from node
+ self.gateway.logic('1;0;2;0;23;\n')
+ ret = self.gateway.handle_queue()
+ # no pre sleep message
+ self.assertEqual(ret, None)
+ # pre sleep message
+ self.gateway.logic('1;255;3;0;32;500\n')
+ ret = self.gateway.handle_queue()
+ # instance responds to request with current value
+ self.assertEqual(ret, '1;0;1;0;23;57\n')
+ # pre sleep message
+ self.gateway.logic('1;255;3;0;32;500\n')
+ ret = self.gateway.handle_queue()
+ # nothing has changed
+ self.assertEqual(ret, None)
+
+ def test_smartsleep_from_unknown(self):
+ """Test smartsleep message from unknown node."""
+ self.gateway.logic('1;255;3;0;32;500\n')
+ ret = self.gateway.handle_queue()
+ self.assertEqual(ret, '1;255;3;0;19;\n')
+
+ def test_set_with_new_state(self):
+ """Test set message with populated new_state."""
+ sensor = self._add_sensor(1)
+ sensor.children[0] = ChildSensor(
+ 0, self.gateway.const.Presentation.S_LIGHT_LEVEL)
+ self.gateway.logic('1;0;1;0;23;43\n')
+ self.gateway.logic('1;255;3;0;32;500\n')
+ self.gateway.logic('1;0;1;0;23;57\n')
+ self.assertEqual(
+ sensor.children[0].values[self.gateway.const.SetReq.V_LIGHT_LEVEL],
+ sensor.new_state[0].values[
+ self.gateway.const.SetReq.V_LIGHT_LEVEL])
+
+
def test_gateway_bad_protocol():
"""Test initializing gateway with a bad protocol_version."""
gateway = Gateway(protocol_version=None)
|
Add support for I_PRE_SLEEP_NOTIFICATION in 2.2.0
Version 2.2.0 changed the behavior of smartsleep. Instead of sending a hearbeat before going back to sleep, the node now sends a `I_PRE_SLEEP_NOTIFICATION` internal message. The node also sends a `I_POST_SLEEP_NOTIFICATION` after waking up from sleep. See:
https://github.com/mysensors/MySensors/pull/722
**Breaking change**
The change outlined above broke the smartsleep feature in pymysensors for users using version 2.2.0 of mysensors.
**Suggested fix**
Use different internal message types for smartsleep method for different mysensors versions. Make a new const module and update these lines:
https://github.com/theolind/pymysensors/blob/dev/mysensors/const_20.py#L372-L373
|
0.0
|
b6deffc604865bba583bf82c089fdfc1d38da4e8
|
[
"tests/test_message.py::test_validate_internal",
"tests/test_mysensors.py::TestGateway22::test_set_with_new_state",
"tests/test_mysensors.py::TestGateway22::test_smartsleep",
"tests/test_mysensors.py::TestGateway22::test_smartsleep_from_unknown"
] |
[
"tests/test_message.py::TestMessage::test_decode",
"tests/test_message.py::TestMessage::test_decode_bad_message",
"tests/test_message.py::TestMessage::test_encode",
"tests/test_message.py::TestMessage::test_encode_bad_message",
"tests/test_message.py::test_validate_pres",
"tests/test_message.py::test_validate_bad_pres",
"tests/test_message.py::test_validate_set",
"tests/test_mysensors.py::TestGateway::test_bad_battery_level",
"tests/test_mysensors.py::TestGateway::test_bad_file_name",
"tests/test_mysensors.py::TestGateway::test_battery_level",
"tests/test_mysensors.py::TestGateway::test_callback",
"tests/test_mysensors.py::TestGateway::test_callback_exception",
"tests/test_mysensors.py::TestGateway::test_child_validate",
"tests/test_mysensors.py::TestGateway::test_id_request_with_node_zero",
"tests/test_mysensors.py::TestGateway::test_internal_config",
"tests/test_mysensors.py::TestGateway::test_internal_gateway_ready",
"tests/test_mysensors.py::TestGateway::test_internal_id_request",
"tests/test_mysensors.py::TestGateway::test_internal_log_message",
"tests/test_mysensors.py::TestGateway::test_internal_sketch_name",
"tests/test_mysensors.py::TestGateway::test_internal_sketch_version",
"tests/test_mysensors.py::TestGateway::test_internal_time",
"tests/test_mysensors.py::TestGateway::test_json_empty_file_good_bak",
"tests/test_mysensors.py::TestGateway::test_json_empty_files",
"tests/test_mysensors.py::TestGateway::test_json_no_files",
"tests/test_mysensors.py::TestGateway::test_json_persistence",
"tests/test_mysensors.py::TestGateway::test_json_upgrade",
"tests/test_mysensors.py::TestGateway::test_logic_bad_message",
"tests/test_mysensors.py::TestGateway::test_non_presented_sensor",
"tests/test_mysensors.py::TestGateway::test_persistence_at_init",
"tests/test_mysensors.py::TestGateway::test_pickle_empty_files",
"tests/test_mysensors.py::TestGateway::test_pickle_persistence",
"tests/test_mysensors.py::TestGateway::test_pickle_upgrade",
"tests/test_mysensors.py::TestGateway::test_present_humidity_sensor",
"tests/test_mysensors.py::TestGateway::test_present_light_level_sensor",
"tests/test_mysensors.py::TestGateway::test_present_same_child",
"tests/test_mysensors.py::TestGateway::test_present_to_non_sensor",
"tests/test_mysensors.py::TestGateway::test_presentation_arduino_node",
"tests/test_mysensors.py::TestGateway::test_req",
"tests/test_mysensors.py::TestGateway::test_req_notasensor",
"tests/test_mysensors.py::TestGateway::test_req_novalue",
"tests/test_mysensors.py::TestGateway::test_req_zerovalue",
"tests/test_mysensors.py::TestGateway::test_set_and_reboot",
"tests/test_mysensors.py::TestGateway::test_set_bad_battery_attribute",
"tests/test_mysensors.py::TestGateway::test_set_child_no_children",
"tests/test_mysensors.py::TestGateway::test_set_child_value",
"tests/test_mysensors.py::TestGateway::test_set_child_value_bad_ack",
"tests/test_mysensors.py::TestGateway::test_set_child_value_bad_type",
"tests/test_mysensors.py::TestGateway::test_set_child_value_no_sensor",
"tests/test_mysensors.py::TestGateway::test_set_child_value_value_type",
"tests/test_mysensors.py::TestGateway::test_set_forecast",
"tests/test_mysensors.py::TestGateway::test_set_humidity_level",
"tests/test_mysensors.py::TestGateway::test_set_light_level",
"tests/test_mysensors.py::TestGateway15::test_bad_battery_level",
"tests/test_mysensors.py::TestGateway15::test_bad_file_name",
"tests/test_mysensors.py::TestGateway15::test_battery_level",
"tests/test_mysensors.py::TestGateway15::test_callback",
"tests/test_mysensors.py::TestGateway15::test_callback_exception",
"tests/test_mysensors.py::TestGateway15::test_child_validate",
"tests/test_mysensors.py::TestGateway15::test_id_request_with_node_zero",
"tests/test_mysensors.py::TestGateway15::test_internal_config",
"tests/test_mysensors.py::TestGateway15::test_internal_gateway_ready",
"tests/test_mysensors.py::TestGateway15::test_internal_id_request",
"tests/test_mysensors.py::TestGateway15::test_internal_log_message",
"tests/test_mysensors.py::TestGateway15::test_internal_sketch_name",
"tests/test_mysensors.py::TestGateway15::test_internal_sketch_version",
"tests/test_mysensors.py::TestGateway15::test_internal_time",
"tests/test_mysensors.py::TestGateway15::test_json_empty_file_good_bak",
"tests/test_mysensors.py::TestGateway15::test_json_empty_files",
"tests/test_mysensors.py::TestGateway15::test_json_no_files",
"tests/test_mysensors.py::TestGateway15::test_json_persistence",
"tests/test_mysensors.py::TestGateway15::test_json_upgrade",
"tests/test_mysensors.py::TestGateway15::test_logic_bad_message",
"tests/test_mysensors.py::TestGateway15::test_non_presented_sensor",
"tests/test_mysensors.py::TestGateway15::test_persistence_at_init",
"tests/test_mysensors.py::TestGateway15::test_pickle_empty_files",
"tests/test_mysensors.py::TestGateway15::test_pickle_persistence",
"tests/test_mysensors.py::TestGateway15::test_pickle_upgrade",
"tests/test_mysensors.py::TestGateway15::test_present_humidity_sensor",
"tests/test_mysensors.py::TestGateway15::test_present_light_level_sensor",
"tests/test_mysensors.py::TestGateway15::test_present_same_child",
"tests/test_mysensors.py::TestGateway15::test_present_to_non_sensor",
"tests/test_mysensors.py::TestGateway15::test_presentation_arduino_node",
"tests/test_mysensors.py::TestGateway15::test_req",
"tests/test_mysensors.py::TestGateway15::test_req_notasensor",
"tests/test_mysensors.py::TestGateway15::test_req_novalue",
"tests/test_mysensors.py::TestGateway15::test_req_zerovalue",
"tests/test_mysensors.py::TestGateway15::test_set_and_reboot",
"tests/test_mysensors.py::TestGateway15::test_set_bad_battery_attribute",
"tests/test_mysensors.py::TestGateway15::test_set_child_no_children",
"tests/test_mysensors.py::TestGateway15::test_set_child_value",
"tests/test_mysensors.py::TestGateway15::test_set_child_value_bad_ack",
"tests/test_mysensors.py::TestGateway15::test_set_child_value_bad_type",
"tests/test_mysensors.py::TestGateway15::test_set_child_value_no_sensor",
"tests/test_mysensors.py::TestGateway15::test_set_child_value_value_type",
"tests/test_mysensors.py::TestGateway15::test_set_forecast",
"tests/test_mysensors.py::TestGateway15::test_set_humidity_level",
"tests/test_mysensors.py::TestGateway15::test_set_light_level",
"tests/test_mysensors.py::TestGateway15::test_set_rgb",
"tests/test_mysensors.py::TestGateway15::test_set_rgbw",
"tests/test_mysensors.py::TestGateway20::test_bad_battery_level",
"tests/test_mysensors.py::TestGateway20::test_bad_file_name",
"tests/test_mysensors.py::TestGateway20::test_battery_level",
"tests/test_mysensors.py::TestGateway20::test_callback",
"tests/test_mysensors.py::TestGateway20::test_callback_exception",
"tests/test_mysensors.py::TestGateway20::test_child_validate",
"tests/test_mysensors.py::TestGateway20::test_discover_response_known",
"tests/test_mysensors.py::TestGateway20::test_discover_response_unknown",
"tests/test_mysensors.py::TestGateway20::test_id_request_with_node_zero",
"tests/test_mysensors.py::TestGateway20::test_internal_config",
"tests/test_mysensors.py::TestGateway20::test_internal_gateway_ready",
"tests/test_mysensors.py::TestGateway20::test_internal_id_request",
"tests/test_mysensors.py::TestGateway20::test_internal_log_message",
"tests/test_mysensors.py::TestGateway20::test_internal_sketch_name",
"tests/test_mysensors.py::TestGateway20::test_internal_sketch_version",
"tests/test_mysensors.py::TestGateway20::test_internal_time",
"tests/test_mysensors.py::TestGateway20::test_json_empty_file_good_bak",
"tests/test_mysensors.py::TestGateway20::test_json_empty_files",
"tests/test_mysensors.py::TestGateway20::test_json_no_files",
"tests/test_mysensors.py::TestGateway20::test_json_persistence",
"tests/test_mysensors.py::TestGateway20::test_json_upgrade",
"tests/test_mysensors.py::TestGateway20::test_logic_bad_message",
"tests/test_mysensors.py::TestGateway20::test_non_presented_child",
"tests/test_mysensors.py::TestGateway20::test_non_presented_sensor",
"tests/test_mysensors.py::TestGateway20::test_persistence_at_init",
"tests/test_mysensors.py::TestGateway20::test_pickle_empty_files",
"tests/test_mysensors.py::TestGateway20::test_pickle_persistence",
"tests/test_mysensors.py::TestGateway20::test_pickle_upgrade",
"tests/test_mysensors.py::TestGateway20::test_present_humidity_sensor",
"tests/test_mysensors.py::TestGateway20::test_present_light_level_sensor",
"tests/test_mysensors.py::TestGateway20::test_present_same_child",
"tests/test_mysensors.py::TestGateway20::test_present_to_non_sensor",
"tests/test_mysensors.py::TestGateway20::test_presentation_arduino_node",
"tests/test_mysensors.py::TestGateway20::test_req",
"tests/test_mysensors.py::TestGateway20::test_req_notasensor",
"tests/test_mysensors.py::TestGateway20::test_req_novalue",
"tests/test_mysensors.py::TestGateway20::test_req_zerovalue",
"tests/test_mysensors.py::TestGateway20::test_set_and_reboot",
"tests/test_mysensors.py::TestGateway20::test_set_bad_battery_attribute",
"tests/test_mysensors.py::TestGateway20::test_set_child_no_children",
"tests/test_mysensors.py::TestGateway20::test_set_child_value",
"tests/test_mysensors.py::TestGateway20::test_set_child_value_bad_ack",
"tests/test_mysensors.py::TestGateway20::test_set_child_value_bad_type",
"tests/test_mysensors.py::TestGateway20::test_set_child_value_no_sensor",
"tests/test_mysensors.py::TestGateway20::test_set_child_value_value_type",
"tests/test_mysensors.py::TestGateway20::test_set_forecast",
"tests/test_mysensors.py::TestGateway20::test_set_humidity_level",
"tests/test_mysensors.py::TestGateway20::test_set_light_level",
"tests/test_mysensors.py::TestGateway20::test_set_position",
"tests/test_mysensors.py::TestGateway20::test_set_with_new_state",
"tests/test_mysensors.py::TestGateway20::test_smartsleep",
"tests/test_mysensors.py::TestGateway20::test_smartsleep_from_unknown",
"tests/test_mysensors.py::TestGateway21::test_bad_battery_level",
"tests/test_mysensors.py::TestGateway21::test_bad_file_name",
"tests/test_mysensors.py::TestGateway21::test_battery_level",
"tests/test_mysensors.py::TestGateway21::test_callback",
"tests/test_mysensors.py::TestGateway21::test_callback_exception",
"tests/test_mysensors.py::TestGateway21::test_child_validate",
"tests/test_mysensors.py::TestGateway21::test_discover_response_known",
"tests/test_mysensors.py::TestGateway21::test_discover_response_unknown",
"tests/test_mysensors.py::TestGateway21::test_id_request_with_node_zero",
"tests/test_mysensors.py::TestGateway21::test_internal_config",
"tests/test_mysensors.py::TestGateway21::test_internal_gateway_ready",
"tests/test_mysensors.py::TestGateway21::test_internal_id_request",
"tests/test_mysensors.py::TestGateway21::test_internal_log_message",
"tests/test_mysensors.py::TestGateway21::test_internal_sketch_name",
"tests/test_mysensors.py::TestGateway21::test_internal_sketch_version",
"tests/test_mysensors.py::TestGateway21::test_internal_time",
"tests/test_mysensors.py::TestGateway21::test_json_empty_file_good_bak",
"tests/test_mysensors.py::TestGateway21::test_json_empty_files",
"tests/test_mysensors.py::TestGateway21::test_json_no_files",
"tests/test_mysensors.py::TestGateway21::test_json_persistence",
"tests/test_mysensors.py::TestGateway21::test_json_upgrade",
"tests/test_mysensors.py::TestGateway21::test_logic_bad_message",
"tests/test_mysensors.py::TestGateway21::test_non_presented_child",
"tests/test_mysensors.py::TestGateway21::test_non_presented_sensor",
"tests/test_mysensors.py::TestGateway21::test_persistence_at_init",
"tests/test_mysensors.py::TestGateway21::test_pickle_empty_files",
"tests/test_mysensors.py::TestGateway21::test_pickle_persistence",
"tests/test_mysensors.py::TestGateway21::test_pickle_upgrade",
"tests/test_mysensors.py::TestGateway21::test_present_humidity_sensor",
"tests/test_mysensors.py::TestGateway21::test_present_light_level_sensor",
"tests/test_mysensors.py::TestGateway21::test_present_same_child",
"tests/test_mysensors.py::TestGateway21::test_present_to_non_sensor",
"tests/test_mysensors.py::TestGateway21::test_presentation_arduino_node",
"tests/test_mysensors.py::TestGateway21::test_req",
"tests/test_mysensors.py::TestGateway21::test_req_notasensor",
"tests/test_mysensors.py::TestGateway21::test_req_novalue",
"tests/test_mysensors.py::TestGateway21::test_req_zerovalue",
"tests/test_mysensors.py::TestGateway21::test_set_and_reboot",
"tests/test_mysensors.py::TestGateway21::test_set_bad_battery_attribute",
"tests/test_mysensors.py::TestGateway21::test_set_child_no_children",
"tests/test_mysensors.py::TestGateway21::test_set_child_value",
"tests/test_mysensors.py::TestGateway21::test_set_child_value_bad_ack",
"tests/test_mysensors.py::TestGateway21::test_set_child_value_bad_type",
"tests/test_mysensors.py::TestGateway21::test_set_child_value_no_sensor",
"tests/test_mysensors.py::TestGateway21::test_set_child_value_value_type",
"tests/test_mysensors.py::TestGateway21::test_set_forecast",
"tests/test_mysensors.py::TestGateway21::test_set_humidity_level",
"tests/test_mysensors.py::TestGateway21::test_set_light_level",
"tests/test_mysensors.py::TestGateway21::test_set_position",
"tests/test_mysensors.py::TestGateway21::test_set_with_new_state",
"tests/test_mysensors.py::TestGateway21::test_smartsleep",
"tests/test_mysensors.py::TestGateway21::test_smartsleep_from_unknown",
"tests/test_mysensors.py::TestGateway22::test_bad_battery_level",
"tests/test_mysensors.py::TestGateway22::test_bad_file_name",
"tests/test_mysensors.py::TestGateway22::test_battery_level",
"tests/test_mysensors.py::TestGateway22::test_callback",
"tests/test_mysensors.py::TestGateway22::test_callback_exception",
"tests/test_mysensors.py::TestGateway22::test_child_validate",
"tests/test_mysensors.py::TestGateway22::test_discover_response_known",
"tests/test_mysensors.py::TestGateway22::test_discover_response_unknown",
"tests/test_mysensors.py::TestGateway22::test_id_request_with_node_zero",
"tests/test_mysensors.py::TestGateway22::test_internal_config",
"tests/test_mysensors.py::TestGateway22::test_internal_gateway_ready",
"tests/test_mysensors.py::TestGateway22::test_internal_id_request",
"tests/test_mysensors.py::TestGateway22::test_internal_log_message",
"tests/test_mysensors.py::TestGateway22::test_internal_sketch_name",
"tests/test_mysensors.py::TestGateway22::test_internal_sketch_version",
"tests/test_mysensors.py::TestGateway22::test_internal_time",
"tests/test_mysensors.py::TestGateway22::test_json_empty_file_good_bak",
"tests/test_mysensors.py::TestGateway22::test_json_empty_files",
"tests/test_mysensors.py::TestGateway22::test_json_no_files",
"tests/test_mysensors.py::TestGateway22::test_json_persistence",
"tests/test_mysensors.py::TestGateway22::test_json_upgrade",
"tests/test_mysensors.py::TestGateway22::test_logic_bad_message",
"tests/test_mysensors.py::TestGateway22::test_non_presented_child",
"tests/test_mysensors.py::TestGateway22::test_non_presented_sensor",
"tests/test_mysensors.py::TestGateway22::test_persistence_at_init",
"tests/test_mysensors.py::TestGateway22::test_pickle_empty_files",
"tests/test_mysensors.py::TestGateway22::test_pickle_persistence",
"tests/test_mysensors.py::TestGateway22::test_pickle_upgrade",
"tests/test_mysensors.py::TestGateway22::test_present_humidity_sensor",
"tests/test_mysensors.py::TestGateway22::test_present_light_level_sensor",
"tests/test_mysensors.py::TestGateway22::test_present_same_child",
"tests/test_mysensors.py::TestGateway22::test_present_to_non_sensor",
"tests/test_mysensors.py::TestGateway22::test_presentation_arduino_node",
"tests/test_mysensors.py::TestGateway22::test_req",
"tests/test_mysensors.py::TestGateway22::test_req_notasensor",
"tests/test_mysensors.py::TestGateway22::test_req_novalue",
"tests/test_mysensors.py::TestGateway22::test_req_zerovalue",
"tests/test_mysensors.py::TestGateway22::test_set_and_reboot",
"tests/test_mysensors.py::TestGateway22::test_set_bad_battery_attribute",
"tests/test_mysensors.py::TestGateway22::test_set_child_no_children",
"tests/test_mysensors.py::TestGateway22::test_set_child_value",
"tests/test_mysensors.py::TestGateway22::test_set_child_value_bad_ack",
"tests/test_mysensors.py::TestGateway22::test_set_child_value_bad_type",
"tests/test_mysensors.py::TestGateway22::test_set_child_value_no_sensor",
"tests/test_mysensors.py::TestGateway22::test_set_child_value_value_type",
"tests/test_mysensors.py::TestGateway22::test_set_forecast",
"tests/test_mysensors.py::TestGateway22::test_set_humidity_level",
"tests/test_mysensors.py::TestGateway22::test_set_light_level",
"tests/test_mysensors.py::TestGateway22::test_set_position",
"tests/test_mysensors.py::test_gateway_bad_protocol",
"tests/test_mysensors.py::test_gateway_low_protocol"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-02-26 19:06:22+00:00
|
mit
| 5,877 |
|
theopenconversationkit__tock-py-9
|
diff --git a/tock/models.py b/tock/models.py
index 67301f0..e3f7eda 100644
--- a/tock/models.py
+++ b/tock/models.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
import abc
+from dataclasses import dataclass
from datetime import datetime
from enum import Enum
-from re import split
from typing import List, Union, Optional
from tock.intent import IntentName
@@ -13,103 +13,84 @@ class PlayerType(Enum):
BOT = "bot"
+@dataclass
class Entity:
-
- def __init__(self, type: str, role: str, evaluated: bool, new: bool, content: str = None, value: str = None):
- self.type = type
- self.role = role
- self.content = content
- self.value = value
- self.evaluated = evaluated
- self.sub_entities = []
- self.new = new
+ type: str
+ role: str
+ evaluated: bool
+ sub_entities = []
+ new: bool
+ content: str = None
+ value: Optional[str] = None
+@dataclass
class Message:
-
- def __init__(self, type: str, text: str):
- self.type = type
- self.text = text
+ type: str
+ text: str
+@dataclass
class ConnectorType:
-
- def __init__(self, id: str, user_interface_type: str):
- self.id = id
- self.user_interface_type = user_interface_type
+ id: str
+ user_interface_type: str
+@dataclass
class UserId:
-
- def __init__(self, id: str, type: PlayerType, client_id: str = None):
- self.id = id
- self.type = type
- self.client_id = client_id
+ id: str
+ type: PlayerType
+ client_id: Optional[str] = None
+@dataclass
class User:
-
- def __init__(self, timezone: str, locale: str, test: bool):
- self.timezone = timezone
- self.locale = locale
- self.test = test
+ timezone: str
+ locale: str
+ test: bool
+@dataclass
class RequestContext:
-
- def __init__(self,
- namespace: str,
- language: str,
- connector_type: ConnectorType,
- user_interface: str,
- application_id: str,
- user_id: UserId,
- bot_id: UserId,
- user: User):
- self.namespace = namespace
- self.language = language
- self.connector_type = connector_type
- self.user_interface = user_interface
- self.application_id = application_id
- self.user_id = user_id
- self.bot_id = bot_id
- self.user = user
+ namespace: str
+ language: str
+ connector_type: ConnectorType
+ user_interface: str
+ application_id: str
+ user_id: UserId
+ bot_id: UserId
+ user: User
+@dataclass
class I18nText:
-
- def __init__(self,
- text: str,
- args: [],
- to_be_translated: bool,
- length: int,
- key: Optional[str] = None
- ):
- self.text = text
- self.args = args
- self.to_be_translated = to_be_translated
- self.length = length
- self.key = key
+ text: str
+ args: []
+ to_be_translated: bool
+ length: int
+ key: Optional[str] = None
+@dataclass
class Suggestion:
-
- def __init__(self, title: I18nText):
- self.title = title
+ title: I18nText
+@dataclass
class BotMessage(abc.ABC):
-
- def __init__(self, delay: int = 0):
- self.delay = delay
+ delay: int
+@dataclass
class Sentence(BotMessage):
+ text: I18nText
+ suggestions: List[Suggestion]
+ delay: int
def __init__(self,
text: I18nText,
suggestions: List[Suggestion],
- delay: int):
+ delay: int = 0):
self.text = text
self.suggestions = suggestions
super().__init__(delay)
@@ -168,28 +149,32 @@ class AttachmentType(Enum):
FILE = "file"
+@dataclass
class Attachment:
-
- def __init__(self, url: str, type: Optional[AttachmentType]):
- self.url = url
- self.type = type
+ url: str
+ type: Optional[AttachmentType]
+@dataclass
class Action:
-
- def __init__(self, title: I18nText, url: Optional[str]):
- self.title = title
- self.url = url
+ title: I18nText
+ url: Optional[str]
+@dataclass
class Card(BotMessage):
+ title: Optional[I18nText]
+ sub_title: Optional[I18nText]
+ attachment: Optional[Attachment]
+ actions: List[Action]
+ delay: int
def __init__(self,
title: Optional[I18nText],
sub_title: Optional[I18nText],
attachment: Optional[Attachment],
actions: List[Action],
- delay: int):
+ delay: int = 0):
self.title = title
self.sub_title = sub_title
self.attachment = attachment
@@ -258,9 +243,12 @@ class Card(BotMessage):
)
+@dataclass
class Carousel(BotMessage):
+ cards: List[Card]
+ delay: int
- def __init__(self, cards: List[Card], delay: int):
+ def __init__(self, cards: List[Card], delay: int = 0):
self.cards = cards
super().__init__(delay)
@@ -281,38 +269,32 @@ class Carousel(BotMessage):
)
+@dataclass
class ResponseContext:
-
- def __init__(self, request_id: str, date: datetime):
- self.request_id = request_id
- self.date = date
+ request_id: str
+ date: datetime
+@dataclass
class BotRequest:
-
- def __init__(self, intent: IntentName, entities: List[Entity], message: Message, story_id: str,
- request_context: RequestContext = None):
- self.intent = intent
- self.entities = entities
- self.message = message
- self.story_id = story_id
- self.request_context = request_context
+ intent: IntentName
+ entities: List[Entity]
+ message: Message
+ story_id: str
+ request_context: RequestContext = None
+@dataclass
class BotResponse:
-
- def __init__(self, messages: List[BotMessage], story_id: str, step: str, context: ResponseContext,
- entities: List[Entity]):
- self.messages = messages
- self.story_id = story_id
- self.step = step
- self.entities = entities
- self.context = context
+ messages: List[BotMessage]
+ story_id: str
+ step: str
+ context: ResponseContext
+ entities: List[Entity]
+@dataclass
class TockMessage:
-
- def __init__(self, request_id: str, bot_request: BotRequest = None, bot_response: BotResponse = None):
- self.bot_request = bot_request
- self.bot_response = bot_response
- self.request_id = request_id
+ request_id: str
+ bot_request: BotRequest = None
+ bot_response: BotResponse = None
|
theopenconversationkit/tock-py
|
896d1baf4cd66a414ad72ee081f3d1288dc01bf5
|
diff --git a/tock/tests/test_schemas.py b/tock/tests/test_schemas.py
index f04c159..5006254 100644
--- a/tock/tests/test_schemas.py
+++ b/tock/tests/test_schemas.py
@@ -209,7 +209,7 @@ class TestEntitySchema(TestCase):
expected = given_entity()
schema = EntitySchema()
result = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestMessageSchema(TestCase):
@@ -217,7 +217,7 @@ class TestMessageSchema(TestCase):
expected = given_message()
schema = MessageSchema()
result: Message = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestConnectorTypeSchema(TestCase):
@@ -225,7 +225,7 @@ class TestConnectorTypeSchema(TestCase):
expected = given_connector_type()
schema = ConnectorTypeSchema()
result: ConnectorType = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestUserIdSchema(TestCase):
@@ -233,7 +233,7 @@ class TestUserIdSchema(TestCase):
expected = given_user_id()
schema = UserIdSchema()
result: UserId = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestUserSchema(TestCase):
@@ -241,7 +241,7 @@ class TestUserSchema(TestCase):
expected = given_user()
schema = UserSchema()
result: User = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestRequestContextSchema(TestCase):
@@ -249,7 +249,7 @@ class TestRequestContextSchema(TestCase):
expected = given_request_context()
schema = RequestContextSchema()
result: RequestContext = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestSuggestionSchema(TestCase):
@@ -257,7 +257,7 @@ class TestSuggestionSchema(TestCase):
expected = given_suggestion()
schema = SuggestionSchema()
result: Suggestion = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestI18nTextSchema(TestCase):
@@ -265,7 +265,7 @@ class TestI18nTextSchema(TestCase):
expected = given_i18n_text()
schema = I18nTextSchema()
result: I18nText = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestSentenceSchema(TestCase):
@@ -275,7 +275,7 @@ class TestSentenceSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: Sentence = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestAttachmentSchema(TestCase):
@@ -285,7 +285,7 @@ class TestAttachmentSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: Attachment = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestActionSchema(TestCase):
@@ -293,7 +293,7 @@ class TestActionSchema(TestCase):
expected = given_action()
schema = ActionSchema()
result: Action = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestCardSchema(TestCase):
@@ -303,7 +303,7 @@ class TestCardSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: Card = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestCarouselSchema(TestCase):
@@ -313,7 +313,7 @@ class TestCarouselSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: Carousel = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestResponseContextSchema(TestCase):
@@ -323,7 +323,7 @@ class TestResponseContextSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: Sentence = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestBotRequestSchema(TestCase):
@@ -333,7 +333,7 @@ class TestBotRequestSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: BotRequest = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestBotResponseSchema(TestCase):
@@ -343,7 +343,7 @@ class TestBotResponseSchema(TestCase):
dumps = schema.dumps(expected)
loads = json.loads(dumps)
result: BotResponse = schema.load(loads)
- compare(expected, result)
+ self.assertEqual(expected, result)
class TestTockMessageSchema(TestCase):
@@ -351,7 +351,7 @@ class TestTockMessageSchema(TestCase):
expected = given_tock_message()
schema = TockMessageSchema()
result: TockMessage = schema.load(json.loads(schema.dumps(expected)))
- compare(expected, result)
+ self.assertEqual(expected, result)
if __name__ == '__main__':
|
using dataclass decorator for building model
cf https://docs.python.org/3/library/dataclasses.html
|
0.0
|
896d1baf4cd66a414ad72ee081f3d1288dc01bf5
|
[
"tock/tests/test_schemas.py::TestEntitySchema::test_json_serialization",
"tock/tests/test_schemas.py::TestMessageSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestConnectorTypeSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestUserIdSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestUserSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestRequestContextSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestSuggestionSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestI18nTextSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestSentenceSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestAttachmentSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestActionSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestCardSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestCarouselSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestResponseContextSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestBotRequestSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestBotResponseSchema::test_json_serialization",
"tock/tests/test_schemas.py::TestTockMessageSchema::test_json_serialization"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-15 20:55:54+00:00
|
mit
| 5,878 |
|
thesimj__envyaml-10
|
diff --git a/envyaml/envyaml.py b/envyaml/envyaml.py
index d02118a..e0ba807 100644
--- a/envyaml/envyaml.py
+++ b/envyaml/envyaml.py
@@ -30,15 +30,21 @@ RE_COMMENTS = re.compile(r"(^#.*\n)", re.MULTILINE | re.UNICODE)
RE_DOT_ENV = re.compile(r"^((?!\d)[\w\- ]+=.*)$", re.MULTILINE | re.UNICODE)
RE_ENV = [
- (re.compile(r"(?<=\$\{)(.*?)(?=\})", re.MULTILINE | re.UNICODE), ["${{{match}}}"]),
+ (
+ re.compile(r"(?<=\$\{)(.*?)(?=\})", re.MULTILINE | re.UNICODE),
+ ["${{{match}}}"]
+ ),
(
re.compile(r"(?<=[\"\']\$)(.*?)(?=[\"\']$)", re.MULTILINE | re.UNICODE),
['"${match}"', "'${match}'"],
),
- (re.compile(r"\$(?!\d)(.*)", re.MULTILINE | re.UNICODE), ["{match}"]),
+ (
+ re.compile(r"\$(?!\d)(.*)(?<![\s\]])", re.MULTILINE | re.UNICODE),
+ ["{match}"]
+ ),
]
-__version__ = "1.1.201202"
+__version__ = "1.2.201222"
class EnvYAML:
@@ -138,8 +144,8 @@ class EnvYAML:
name, value = line.strip().split("=", 1) # type: str,str
# strip names and values
- name = name.strip().strip("'\"")
- value = value.strip().strip("'\"")
+ name = name.strip().strip("'\" ")
+ value = value.strip().strip("'\" ")
# set config
config[name] = value
|
thesimj/envyaml
|
7fa3fe20524bada64bad7db2008ee872a8ccde09
|
diff --git a/tests/env.test.yaml b/tests/env.test.yaml
index 156ae33..7f97a5c 100644
--- a/tests/env.test.yaml
+++ b/tests/env.test.yaml
@@ -77,6 +77,9 @@ empty:
novalues:
noenvvalue: $EMPTY_ENV|""
+var_in_array:
+ to: [ $USERNAME ]
+
#
# Comments
#
diff --git a/tests/test_envyaml.py b/tests/test_envyaml.py
index dba827a..b45941f 100644
--- a/tests/test_envyaml.py
+++ b/tests/test_envyaml.py
@@ -284,3 +284,10 @@ def test_it_should_return_proper_formatted_string():
def test_it_should_raise_exception_in_strict_mode():
with pytest.raises(ValueError):
EnvYAML("tests/env.ignored.yaml")
+
+
+def test_it_should_parser_environment_inside_array_and_object():
+ env = EnvYAML("tests/env.test.yaml", env_file="tests/test.env")
+
+ # assert array
+ assert env['var_in_array.to.0'] == 'env-username'
|
Parsing fails with inline list
I had the below structure in a config file and the parsing
```
mail:
to: [ $HANDLERS_SMTP_TO ]
```
I get the following error
`ValueError: Strict mode enabled, variable $SMTP_HANDLER_TO] not defined!`
The below works just fine though
```
mail:
to:
- $HANDLERS_SMTP_TO
```
|
0.0
|
7fa3fe20524bada64bad7db2008ee872a8ccde09
|
[
"tests/test_envyaml.py::test_it_should_read_env_file",
"tests/test_envyaml.py::test_it_should_read_custom_file",
"tests/test_envyaml.py::test_it_should_get_default_values",
"tests/test_envyaml.py::test_it_should_raise_key_error_when_no_values",
"tests/test_envyaml.py::test_it_should_populate_env_variable",
"tests/test_envyaml.py::test_it_should_return_dict_on_export",
"tests/test_envyaml.py::test_it_should_convert_config_to_dict",
"tests/test_envyaml.py::test_it_should_access_all_keys_in_config",
"tests/test_envyaml.py::test_it_should_access_keys_and_lists",
"tests/test_envyaml.py::test_it_should_read_config_from_env_variable",
"tests/test_envyaml.py::test_it_should_use_default_value",
"tests/test_envyaml.py::test_it_should_get_lists_values_by_number",
"tests/test_envyaml.py::test_it_should_proper_handle_dollar_sign_with_number",
"tests/test_envyaml.py::test_it_should_proper_complex_variable",
"tests/test_envyaml.py::test_it_should_proper_complex_variable_2",
"tests/test_envyaml.py::test_it_should_return_proper_formatted_string",
"tests/test_envyaml.py::test_it_should_parser_environment_inside_array_and_object"
] |
[
"tests/test_envyaml.py::test_it_should_return_default_value",
"tests/test_envyaml.py::test_it_should_access_environment_variables",
"tests/test_envyaml.py::test_it_should_fail_when_access_environment_variables",
"tests/test_envyaml.py::test_it_should_access_environ",
"tests/test_envyaml.py::test_it_should_read_default_file",
"tests/test_envyaml.py::test_it_should_raise_exception_when_file_not_found",
"tests/test_envyaml.py::test_it_should_not_fail_when_try_load_non_exist_default_file",
"tests/test_envyaml.py::test_it_should_not_fail_when_try_load_default_empty_yaml_file",
"tests/test_envyaml.py::test_it_should_not_fail_when_try_load_default_empty_dotenv_file",
"tests/test_envyaml.py::test_it_should_be_valid_in_check",
"tests/test_envyaml.py::test_it_should_be_read_if_strict_disabled",
"tests/test_envyaml.py::test_it_should_raise_exception_in_strict_mode"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-12-22 21:23:55+00:00
|
mit
| 5,879 |
|
theskumar__python-dotenv-148
|
diff --git a/dotenv/main.py b/dotenv/main.py
index 349ec06..1a88238 100644
--- a/dotenv/main.py
+++ b/dotenv/main.py
@@ -2,47 +2,90 @@
from __future__ import absolute_import, print_function, unicode_literals
import codecs
-import fileinput
import io
import os
import re
+import shutil
import sys
from subprocess import Popen
+import tempfile
import warnings
-from collections import OrderedDict
+from collections import OrderedDict, namedtuple
+from contextlib import contextmanager
from .compat import StringIO, PY2, WIN, text_type
-__escape_decoder = codecs.getdecoder('unicode_escape')
-__posix_variable = re.compile('\$\{[^\}]*\}') # noqa
+__posix_variable = re.compile(r'\$\{[^\}]*\}')
+_binding = re.compile(
+ r"""
+ (
+ \s* # leading whitespace
+ (?:export\s+)? # export
-def decode_escaped(escaped):
- return __escape_decoder(escaped)[0]
+ ( '[^']+' # single-quoted key
+ | [^=\#\s]+ # or unquoted key
+ )?
+ (?:
+ (?:\s*=\s*) # equal sign
-def parse_line(line):
- line = line.strip()
+ ( '(?:\\'|[^'])*' # single-quoted value
+ | "(?:\\"|[^"])*" # or double-quoted value
+ | [^\#\r\n]* # or unquoted value
+ )
+ )?
- # Ignore lines with `#` or which doesn't have `=` in it.
- if not line or line.startswith('#') or '=' not in line:
- return None, None
+ \s* # trailing whitespace
+ (?:\#[^\r\n]*)? # comment
+ (?:\r|\n|\r\n)? # newline
+ )
+ """,
+ re.MULTILINE | re.VERBOSE,
+)
- k, v = line.split('=', 1)
+_escape_sequence = re.compile(r"\\[\\'\"abfnrtv]")
- if k.startswith('export '):
- (_, _, k) = k.partition('export ')
- # Remove any leading and trailing spaces in key, value
- k, v = k.strip(), v.strip()
+Binding = namedtuple('Binding', 'key value original')
- if v:
- v = v.encode('unicode-escape').decode('ascii')
- quoted = v[0] == v[-1] in ['"', "'"]
- if quoted:
- v = decode_escaped(v[1:-1])
- return k, v
+def decode_escapes(string):
+ def decode_match(match):
+ return codecs.decode(match.group(0), 'unicode-escape')
+
+ return _escape_sequence.sub(decode_match, string)
+
+
+def is_surrounded_by(string, char):
+ return (
+ len(string) > 1
+ and string[0] == string[-1] == char
+ )
+
+
+def parse_binding(string, position):
+ match = _binding.match(string, position)
+ (matched, key, value) = match.groups()
+ if key is None or value is None:
+ key = None
+ value = None
+ else:
+ value_quoted = is_surrounded_by(value, "'") or is_surrounded_by(value, '"')
+ if value_quoted:
+ value = decode_escapes(value[1:-1])
+ else:
+ value = value.strip()
+ return (Binding(key=key, value=value, original=matched), match.end())
+
+
+def parse_stream(stream):
+ string = stream.read()
+ position = 0
+ length = len(string)
+ while position < length:
+ (binding, position) = parse_binding(string, position)
+ yield binding
class DotEnv():
@@ -52,19 +95,17 @@ class DotEnv():
self._dict = None
self.verbose = verbose
+ @contextmanager
def _get_stream(self):
- self._is_file = False
if isinstance(self.dotenv_path, StringIO):
- return self.dotenv_path
-
- if os.path.isfile(self.dotenv_path):
- self._is_file = True
- return io.open(self.dotenv_path)
-
- if self.verbose:
- warnings.warn("File doesn't exist {}".format(self.dotenv_path))
-
- return StringIO('')
+ yield self.dotenv_path
+ elif os.path.isfile(self.dotenv_path):
+ with io.open(self.dotenv_path) as stream:
+ yield stream
+ else:
+ if self.verbose:
+ warnings.warn("File doesn't exist {}".format(self.dotenv_path))
+ yield StringIO('')
def dict(self):
"""Return dotenv as dict"""
@@ -76,17 +117,10 @@ class DotEnv():
return self._dict
def parse(self):
- f = self._get_stream()
-
- for line in f:
- key, value = parse_line(line)
- if not key:
- continue
-
- yield key, value
-
- if self._is_file:
- f.close()
+ with self._get_stream() as stream:
+ for mapping in parse_stream(stream):
+ if mapping.key is not None and mapping.value is not None:
+ yield mapping.key, mapping.value
def set_as_environment_variables(self, override=False):
"""
@@ -126,6 +160,20 @@ def get_key(dotenv_path, key_to_get):
return DotEnv(dotenv_path, verbose=True).get(key_to_get)
+@contextmanager
+def rewrite(path):
+ try:
+ with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest:
+ with io.open(path) as source:
+ yield (source, dest)
+ except BaseException:
+ if os.path.isfile(dest.name):
+ os.unlink(dest.name)
+ raise
+ else:
+ shutil.move(dest.name, path)
+
+
def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"):
"""
Adds or Updates a key/value to the given .env
@@ -141,20 +189,19 @@ def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"):
if " " in value_to_set:
quote_mode = "always"
- line_template = '{}="{}"' if quote_mode == "always" else '{}={}'
+ line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n'
line_out = line_template.format(key_to_set, value_to_set)
- replaced = False
- for line in fileinput.input(dotenv_path, inplace=True):
- k, v = parse_line(line)
- if k == key_to_set:
- replaced = True
- line = "{}\n".format(line_out)
- print(line, end='')
-
- if not replaced:
- with io.open(dotenv_path, "a") as f:
- f.write("{}\n".format(line_out))
+ with rewrite(dotenv_path) as (source, dest):
+ replaced = False
+ for mapping in parse_stream(source):
+ if mapping.key == key_to_set:
+ dest.write(line_out)
+ replaced = True
+ else:
+ dest.write(mapping.original)
+ if not replaced:
+ dest.write(line_out)
return True, key_to_set, value_to_set
@@ -166,18 +213,17 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
If the .env path given doesn't exist, fails
If the given key doesn't exist in the .env, fails
"""
- removed = False
-
if not os.path.exists(dotenv_path):
warnings.warn("can't delete from %s - it doesn't exist." % dotenv_path)
return None, key_to_unset
- for line in fileinput.input(dotenv_path, inplace=True):
- k, v = parse_line(line)
- if k == key_to_unset:
- removed = True
- line = ''
- print(line, end='')
+ removed = False
+ with rewrite(dotenv_path) as (source, dest):
+ for mapping in parse_stream(source):
+ if mapping.key == key_to_unset:
+ removed = True
+ else:
+ dest.write(mapping.original)
if not removed:
warnings.warn("key %s not removed from %s - key doesn't exist." % (key_to_unset, dotenv_path))
|
theskumar/python-dotenv
|
3b7e60e6cbdef596701c1921257ea9e48076eec3
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 15c47af..b594592 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,12 +1,13 @@
# -*- coding: utf-8 -*-
-from os import environ
+import os
from os.path import dirname, join
+import pytest
+import sh
+
import dotenv
-from dotenv.version import __version__
from dotenv.cli import cli as dotenv_cli
-
-import sh
+from dotenv.version import __version__
here = dirname(__file__)
dotenv_path = join(here, '.env')
@@ -38,6 +39,22 @@ def test_set_key(dotenv_file):
with open(dotenv_file, 'r') as fp:
assert 'HELLO="WORLD 2"\nfoo="bar"' == fp.read().strip()
+ success, key_to_set, value_to_set = dotenv.set_key(dotenv_file, "HELLO", "WORLD\n3")
+
+ with open(dotenv_file, "r") as fp:
+ assert 'HELLO="WORLD\n3"\nfoo="bar"' == fp.read().strip()
+
+
+def test_set_key_permission_error(dotenv_file):
+ os.chmod(dotenv_file, 0o000)
+
+ with pytest.raises(Exception):
+ dotenv.set_key(dotenv_file, "HELLO", "WORLD")
+
+ os.chmod(dotenv_file, 0o600)
+ with open(dotenv_file, "r") as fp:
+ assert fp.read() == ""
+
def test_list(cli, dotenv_file):
success, key_to_set, value_to_set = dotenv.set_key(dotenv_file, 'HELLO', 'WORLD')
@@ -59,6 +76,13 @@ def test_list_wo_file(cli):
assert 'Invalid value for "-f"' in result.output
+def test_empty_value():
+ with open(dotenv_path, "w") as f:
+ f.write("TEST=")
+ assert dotenv.get_key(dotenv_path, "TEST") == ""
+ sh.rm(dotenv_path)
+
+
def test_key_value_without_quotes():
with open(dotenv_path, 'w') as f:
f.write("TEST = value \n")
@@ -95,18 +119,41 @@ def test_value_with_special_characters():
sh.rm(dotenv_path)
-def test_unset():
- sh.touch(dotenv_path)
- success, key_to_set, value_to_set = dotenv.set_key(dotenv_path, 'HELLO', 'WORLD')
- stored_value = dotenv.get_key(dotenv_path, 'HELLO')
- assert stored_value == 'WORLD'
- success, key_to_unset = dotenv.unset_key(dotenv_path, 'HELLO')
- assert success is True
- assert dotenv.get_key(dotenv_path, 'HELLO') is None
- success, key_to_unset = dotenv.unset_key(dotenv_path, 'RANDOM')
- assert success is None
+def test_value_with_new_lines():
+ with open(dotenv_path, 'w') as f:
+ f.write('TEST="a\nb"')
+ assert dotenv.get_key(dotenv_path, 'TEST') == "a\nb"
+ sh.rm(dotenv_path)
+
+ with open(dotenv_path, 'w') as f:
+ f.write("TEST='a\nb'")
+ assert dotenv.get_key(dotenv_path, 'TEST') == "a\nb"
+ sh.rm(dotenv_path)
+
+
+def test_value_after_comment():
+ with open(dotenv_path, "w") as f:
+ f.write("# comment\nTEST=a")
+ assert dotenv.get_key(dotenv_path, "TEST") == "a"
sh.rm(dotenv_path)
- success, key_to_unset = dotenv.unset_key(dotenv_path, 'HELLO')
+
+
+def test_unset_ok(dotenv_file):
+ with open(dotenv_file, "w") as f:
+ f.write("a=b\nc=d")
+
+ success, key_to_unset = dotenv.unset_key(dotenv_file, "a")
+
+ assert success is True
+ assert key_to_unset == "a"
+ with open(dotenv_file, "r") as f:
+ assert f.read() == "c=d"
+ sh.rm(dotenv_file)
+
+
+def test_unset_non_existing_file():
+ success, key_to_unset = dotenv.unset_key('/non-existing', 'HELLO')
+
assert success is None
@@ -180,7 +227,7 @@ def test_get_key_with_interpolation(cli):
stored_value = dotenv.get_key(dotenv_path, 'BAR')
assert stored_value == 'CONCATENATED_WORLD_POSIX_VAR'
# test replace from environ taking precedence over file
- environ["HELLO"] = "TAKES_PRECEDENCE"
+ os.environ["HELLO"] = "TAKES_PRECEDENCE"
stored_value = dotenv.get_key(dotenv_path, 'FOO')
assert stored_value == "TAKES_PRECEDENCE"
sh.rm(dotenv_path)
@@ -194,10 +241,10 @@ def test_get_key_with_interpolation_of_unset_variable(cli):
stored_value = dotenv.get_key(dotenv_path, 'FOO')
assert stored_value == ''
# unless present in environment
- environ['NOT_SET'] = 'BAR'
+ os.environ['NOT_SET'] = 'BAR'
stored_value = dotenv.get_key(dotenv_path, 'FOO')
assert stored_value == 'BAR'
- del(environ['NOT_SET'])
+ del(os.environ['NOT_SET'])
sh.rm(dotenv_path)
diff --git a/tests/test_core.py b/tests/test_core.py
index 45a1f86..bda2e3b 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -9,7 +9,7 @@ import warnings
import sh
from dotenv import load_dotenv, find_dotenv, set_key, dotenv_values
-from dotenv.main import parse_line
+from dotenv.main import Binding, parse_stream
from dotenv.compat import StringIO
from IPython.terminal.embed import InteractiveShellEmbed
@@ -25,21 +25,71 @@ def restore_os_environ():
@pytest.mark.parametrize("test_input,expected", [
- ("a=b", ("a", "b")),
- (" a = b ", ("a", "b")),
- ("export a=b", ("a", "b")),
- (" export 'a'=b", ("'a'", "b")),
- (" export 'a'=b", ("'a'", "b")),
- ("# a=b", (None, None)),
- ("# a=b", (None, None)),
- ("a=b space ", ('a', 'b space')),
- ("a='b space '", ('a', 'b space ')),
- ('a="b space "', ('a', 'b space ')),
- ("export export_spam=1", ("export_spam", "1")),
- ("export port=8000", ("port", "8000")),
+ ("", []),
+ ("a=b", [Binding(key="a", value="b", original="a=b")]),
+ ("'a'=b", [Binding(key="'a'", value="b", original="'a'=b")]),
+ ("[=b", [Binding(key="[", value="b", original="[=b")]),
+ (" a = b ", [Binding(key="a", value="b", original=" a = b ")]),
+ ("export a=b", [Binding(key="a", value="b", original="export a=b")]),
+ (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]),
+ (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]),
+ ("# a=b", [Binding(key=None, value=None, original="# a=b")]),
+ ('a=b # comment', [Binding(key="a", value="b", original="a=b # comment")]),
+ ("a=b space ", [Binding(key="a", value="b space", original="a=b space ")]),
+ ("a='b space '", [Binding(key="a", value="b space ", original="a='b space '")]),
+ ('a="b space "', [Binding(key="a", value="b space ", original='a="b space "')]),
+ ("export export_a=1", [Binding(key="export_a", value="1", original="export export_a=1")]),
+ ("export port=8000", [Binding(key="port", value="8000", original="export port=8000")]),
+ ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]),
+ ("a='b\nc'", [Binding(key="a", value="b\nc", original="a='b\nc'")]),
+ ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]),
+ ('a="b\\nc"', [Binding(key="a", value='b\nc', original='a="b\\nc"')]),
+ ('a="b\\"c"', [Binding(key="a", value='b"c', original='a="b\\"c"')]),
+ ("a='b\\'c'", [Binding(key="a", value="b'c", original="a='b\\'c'")]),
+ ("a=à", [Binding(key="a", value="à", original="a=à")]),
+ ('a="à"', [Binding(key="a", value="à", original='a="à"')]),
+ ('garbage', [Binding(key=None, value=None, original="garbage")]),
+ (
+ "a=b\nc=d",
+ [
+ Binding(key="a", value="b", original="a=b\n"),
+ Binding(key="c", value="d", original="c=d"),
+ ],
+ ),
+ (
+ "a=b\r\nc=d",
+ [
+ Binding(key="a", value="b", original="a=b\r\n"),
+ Binding(key="c", value="d", original="c=d"),
+ ],
+ ),
+ (
+ 'a="\nb=c',
+ [
+ Binding(key="a", value='"', original='a="\n'),
+ Binding(key="b", value='c', original="b=c"),
+ ]
+ ),
+ (
+ '# comment\na="b\nc"\nd=e\n',
+ [
+ Binding(key=None, value=None, original="# comment\n"),
+ Binding(key="a", value="b\nc", original='a="b\nc"\n'),
+ Binding(key="d", value="e", original="d=e\n"),
+ ],
+ ),
+ (
+ 'garbage[%$#\na=b',
+ [
+ Binding(key=None, value=None, original="garbage[%$#\n"),
+ Binding(key="a", value="b", original='a=b'),
+ ],
+ ),
])
-def test_parse_line(test_input, expected):
- assert parse_line(test_input) == expected
+def test_parse_stream(test_input, expected):
+ result = parse_stream(StringIO(test_input))
+
+ assert list(result) == expected
def test_warns_if_file_does_not_exist():
|
Cannot get multiline strings to work?
According to the documentation `TEST="foo\nbar"` should produce a multiline environment variable however I cannot get it to work, what am I doing wrong? I have tried every variation I can think of.
**.env**
```
TEST1="foo\nbar"
TEST2=foo\nbar
TEST3="foo
bar"
TEST4="foo\\nbar"
TEST5=foo\\nbar
TEST6=foo
bar
TEST7="foo\
bar"
TEST8=foo\
bar
```
**test.py**
```
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path, verbose=True)
tests = (
(key, value)
for key, value in os.environ.items()
if key.startswith('TEST'))
for key, value in tests:
print(key, value)
```
**output**
```
TEST1 foo\nbar
TEST2 foo\\nbar
TEST3 "foo
TEST4 foo\\nbar
TEST5 foo\\\\nbar
TEST6 foo
TEST7 "foo\\
TEST8 foo\\
```
Trying with "real" environment variables through bash:
```
$ export TEST="foo
bar"
$ python -c "import os; print(os.environ['TEST'])"
foo
bar
```
**using**
Darwin Kernel Version 15.6.0
Python 3.6.2
python-dotenv==0.7.1
|
0.0
|
3b7e60e6cbdef596701c1921257ea9e48076eec3
|
[
"tests/test_cli.py::test_get_key",
"tests/test_cli.py::test_set_key",
"tests/test_cli.py::test_list",
"tests/test_cli.py::test_get_cli",
"tests/test_cli.py::test_empty_value",
"tests/test_cli.py::test_key_value_without_quotes",
"tests/test_cli.py::test_value_with_quotes",
"tests/test_cli.py::test_value_with_special_characters",
"tests/test_cli.py::test_value_with_new_lines",
"tests/test_cli.py::test_value_after_comment",
"tests/test_cli.py::test_unset_ok",
"tests/test_cli.py::test_unset_non_existing_file",
"tests/test_cli.py::test_unset_cli",
"tests/test_cli.py::test_get_key_with_interpolation",
"tests/test_cli.py::test_get_key_with_interpolation_of_unset_variable",
"tests/test_cli.py::test_run_without_cmd",
"tests/test_cli.py::test_run_with_invalid_cmd",
"tests/test_cli.py::test_run_with_version",
"tests/test_core.py::test_parse_stream[-expected0]",
"tests/test_core.py::test_parse_stream[a=b-expected1]",
"tests/test_core.py::test_parse_stream['a'=b-expected2]",
"tests/test_core.py::test_parse_stream[[=b-expected3]",
"tests/test_core.py::test_parse_stream[",
"tests/test_core.py::test_parse_stream[export",
"tests/test_core.py::test_parse_stream[#",
"tests/test_core.py::test_parse_stream[a=b",
"tests/test_core.py::test_parse_stream[a='b",
"tests/test_core.py::test_parse_stream[a=\"b",
"tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected15]",
"tests/test_core.py::test_parse_stream[a='b\\nc'-expected16]",
"tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected17]",
"tests/test_core.py::test_parse_stream[a=\"b\\\\nc\"-expected18]",
"tests/test_core.py::test_parse_stream[a=\"b\\\\\"c\"-expected19]",
"tests/test_core.py::test_parse_stream[a='b\\\\'c'-expected20]",
"tests/test_core.py::test_parse_stream[a=\\xe0-expected21]",
"tests/test_core.py::test_parse_stream[a=\"\\xe0\"-expected22]",
"tests/test_core.py::test_parse_stream[garbage-expected23]",
"tests/test_core.py::test_parse_stream[a=b\\nc=d-expected24]",
"tests/test_core.py::test_parse_stream[a=b\\r\\nc=d-expected25]",
"tests/test_core.py::test_parse_stream[a=\"\\nb=c-expected26]",
"tests/test_core.py::test_parse_stream[garbage[%$#\\na=b-expected28]",
"tests/test_core.py::test_warns_if_file_does_not_exist",
"tests/test_core.py::test_find_dotenv",
"tests/test_core.py::test_load_dotenv",
"tests/test_core.py::test_load_dotenv_override",
"tests/test_core.py::test_load_dotenv_in_current_dir",
"tests/test_core.py::test_ipython",
"tests/test_core.py::test_ipython_override",
"tests/test_core.py::test_dotenv_values_stream",
"tests/test_core.py::test_dotenv_values_export",
"tests/test_core.py::test_dotenv_empty_selfreferential_interpolation",
"tests/test_core.py::test_dotenv_nonempty_selfreferential_interpolation"
] |
[] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-28 14:54:09+00:00
|
bsd-3-clause
| 5,880 |
|
theskumar__python-dotenv-158
|
diff --git a/dotenv/main.py b/dotenv/main.py
index 1a88238..98b22ec 100644
--- a/dotenv/main.py
+++ b/dotenv/main.py
@@ -21,14 +21,14 @@ _binding = re.compile(
r"""
(
\s* # leading whitespace
- (?:export\s+)? # export
+ (?:export{0}+)? # export
( '[^']+' # single-quoted key
| [^=\#\s]+ # or unquoted key
)?
(?:
- (?:\s*=\s*) # equal sign
+ (?:{0}*={0}*) # equal sign
( '(?:\\'|[^'])*' # single-quoted value
| "(?:\\"|[^"])*" # or double-quoted value
@@ -40,7 +40,7 @@ _binding = re.compile(
(?:\#[^\r\n]*)? # comment
(?:\r|\n|\r\n)? # newline
)
- """,
+ """.format(r'[^\S\r\n]'),
re.MULTILINE | re.VERBOSE,
)
|
theskumar/python-dotenv
|
3daef30cbb392d0b57e70cf2e28814496c3cf5e9
|
diff --git a/tests/test_core.py b/tests/test_core.py
index bda2e3b..f797600 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -63,6 +63,13 @@ def restore_os_environ():
Binding(key="c", value="d", original="c=d"),
],
),
+ (
+ 'a=\nb=c',
+ [
+ Binding(key="a", value='', original='a=\n'),
+ Binding(key="b", value='c', original="b=c"),
+ ]
+ ),
(
'a="\nb=c',
[
|
Regression 0.9.1 -> 0.10.0: .env parsed incorrectly
Hello!
I have upgraded dotenv from 0.9.1 to 0.10 and now my config file is parsed incorrectly.
Example file:
```
$ cat .env
VAR_A=
VAR_B=123
```
Current behaviour on 0.10:
```
$ dotenv list
VAR_A=VAR_B=123
```
Previous (correct) behaviour on 0.9:
```
$ dotenv list
VAR_A=
VAR_B=123
```
|
0.0
|
3daef30cbb392d0b57e70cf2e28814496c3cf5e9
|
[
"tests/test_core.py::test_parse_stream[a=\\nb=c-expected26]"
] |
[
"tests/test_core.py::test_parse_stream[-expected0]",
"tests/test_core.py::test_parse_stream[a=b-expected1]",
"tests/test_core.py::test_parse_stream['a'=b-expected2]",
"tests/test_core.py::test_parse_stream[[=b-expected3]",
"tests/test_core.py::test_parse_stream[",
"tests/test_core.py::test_parse_stream[export",
"tests/test_core.py::test_parse_stream[#",
"tests/test_core.py::test_parse_stream[a=b",
"tests/test_core.py::test_parse_stream[a='b",
"tests/test_core.py::test_parse_stream[a=\"b",
"tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected15]",
"tests/test_core.py::test_parse_stream[a='b\\nc'-expected16]",
"tests/test_core.py::test_parse_stream[a=\"b\\nc\"-expected17]",
"tests/test_core.py::test_parse_stream[a=\"b\\\\nc\"-expected18]",
"tests/test_core.py::test_parse_stream[a=\"b\\\\\"c\"-expected19]",
"tests/test_core.py::test_parse_stream[a='b\\\\'c'-expected20]",
"tests/test_core.py::test_parse_stream[a=\\xe0-expected21]",
"tests/test_core.py::test_parse_stream[a=\"\\xe0\"-expected22]",
"tests/test_core.py::test_parse_stream[garbage-expected23]",
"tests/test_core.py::test_parse_stream[a=b\\nc=d-expected24]",
"tests/test_core.py::test_parse_stream[a=b\\r\\nc=d-expected25]",
"tests/test_core.py::test_parse_stream[a=\"\\nb=c-expected27]",
"tests/test_core.py::test_parse_stream[garbage[%$#\\na=b-expected29]",
"tests/test_core.py::test_warns_if_file_does_not_exist",
"tests/test_core.py::test_find_dotenv",
"tests/test_core.py::test_load_dotenv",
"tests/test_core.py::test_load_dotenv_override",
"tests/test_core.py::test_load_dotenv_in_current_dir",
"tests/test_core.py::test_ipython",
"tests/test_core.py::test_ipython_override",
"tests/test_core.py::test_dotenv_values_stream",
"tests/test_core.py::test_dotenv_values_export",
"tests/test_core.py::test_dotenv_empty_selfreferential_interpolation",
"tests/test_core.py::test_dotenv_nonempty_selfreferential_interpolation"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-12-14 08:36:38+00:00
|
bsd-3-clause
| 5,881 |
|
theskumar__python-dotenv-180
|
diff --git a/README.md b/README.md
index 64fa64f..7ca6477 100644
--- a/README.md
+++ b/README.md
@@ -299,8 +299,9 @@ Changelog
Unreleased
-----
-- ...
-
+- Refactor parser to fix parsing inconsistencies ([@bbc2])([#170]).
+ - Interpret escapes as control characters only in double-quoted strings.
+ - Interpret `#` as start of comment only if preceded by whitespace.
0.10.2
-----
@@ -428,6 +429,7 @@ Unreleased
[#172]: https://github.com/theskumar/python-dotenv/issues/172
[#121]: https://github.com/theskumar/python-dotenv/issues/121
[#176]: https://github.com/theskumar/python-dotenv/issues/176
+[#170]: https://github.com/theskumar/python-dotenv/issues/170
[@asyncee]: https://github.com/asyncee
[@greyli]: https://github.com/greyli
diff --git a/setup.cfg b/setup.cfg
index 7f78459..f0847b3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,6 +5,9 @@ universal = 1
max-line-length = 120
exclude = .tox,.git,docs,venv,.venv
+[mypy]
+ignore_missing_imports = true
+
[metadata]
description-file = README.rst
diff --git a/src/dotenv/compat.py b/src/dotenv/compat.py
index 99ffb39..1a14534 100644
--- a/src/dotenv/compat.py
+++ b/src/dotenv/compat.py
@@ -1,4 +1,5 @@
import sys
+from typing import Text
if sys.version_info >= (3, 0):
from io import StringIO # noqa
@@ -6,3 +7,17 @@ else:
from StringIO import StringIO # noqa
PY2 = sys.version_info[0] == 2 # type: bool
+
+
+def to_text(string):
+ # type: (str) -> Text
+ """
+ Make a string Unicode if it isn't already.
+
+ This is useful for defining raw unicode strings because `ur"foo"` isn't valid in
+ Python 3.
+ """
+ if PY2:
+ return string.decode("utf-8")
+ else:
+ return string
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index 0812282..5b619b1 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
-import codecs
import io
import os
import re
@@ -9,13 +8,14 @@ import shutil
import sys
from subprocess import Popen
import tempfile
-from typing import (Any, Dict, Iterator, List, Match, NamedTuple, Optional, # noqa
- Pattern, Union, TYPE_CHECKING, Text, IO, Tuple) # noqa
+from typing import (Dict, Iterator, List, Match, Optional, # noqa
+ Pattern, Union, TYPE_CHECKING, Text, IO, Tuple)
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from .compat import StringIO, PY2
+from .parser import parse_stream
if TYPE_CHECKING: # pragma: no cover
if sys.version_info >= (3, 6):
@@ -30,84 +30,6 @@ if TYPE_CHECKING: # pragma: no cover
__posix_variable = re.compile(r'\$\{[^\}]*\}') # type: Pattern[Text]
-_binding = re.compile(
- r"""
- (
- \s* # leading whitespace
- (?:export{0}+)? # export
-
- ( '[^']+' # single-quoted key
- | [^=\#\s]+ # or unquoted key
- )?
-
- (?:
- (?:{0}*={0}*) # equal sign
-
- ( '(?:\\'|[^'])*' # single-quoted value
- | "(?:\\"|[^"])*" # or double-quoted value
- | [^\#\r\n]* # or unquoted value
- )
- )?
-
- \s* # trailing whitespace
- (?:\#[^\r\n]*)? # comment
- (?:\r|\n|\r\n)? # newline
- )
- """.format(r'[^\S\r\n]'),
- re.MULTILINE | re.VERBOSE,
-) # type: Pattern[Text]
-
-_escape_sequence = re.compile(r"\\[\\'\"abfnrtv]") # type: Pattern[Text]
-
-
-Binding = NamedTuple("Binding", [("key", Optional[Text]),
- ("value", Optional[Text]),
- ("original", Text)])
-
-
-def decode_escapes(string):
- # type: (Text) -> Text
- def decode_match(match):
- # type: (Match[Text]) -> Text
- return codecs.decode(match.group(0), 'unicode-escape') # type: ignore
-
- return _escape_sequence.sub(decode_match, string)
-
-
-def is_surrounded_by(string, char):
- # type: (Text, Text) -> bool
- return (
- len(string) > 1
- and string[0] == string[-1] == char
- )
-
-
-def parse_binding(string, position):
- # type: (Text, int) -> Tuple[Binding, int]
- match = _binding.match(string, position)
- assert match is not None
- (matched, key, value) = match.groups()
- if key is None or value is None:
- key = None
- value = None
- else:
- value_quoted = is_surrounded_by(value, "'") or is_surrounded_by(value, '"')
- if value_quoted:
- value = decode_escapes(value[1:-1])
- else:
- value = value.strip()
- return (Binding(key=key, value=value, original=matched), match.end())
-
-
-def parse_stream(stream):
- # type:(IO[Text]) -> Iterator[Binding]
- string = stream.read()
- position = 0
- length = len(string)
- while position < length:
- (binding, position) = parse_binding(string, position)
- yield binding
-
def to_env(text):
# type: (Text) -> str
diff --git a/src/dotenv/parser.py b/src/dotenv/parser.py
new file mode 100644
index 0000000..b63cb3a
--- /dev/null
+++ b/src/dotenv/parser.py
@@ -0,0 +1,147 @@
+import codecs
+import re
+from typing import (IO, Iterator, Match, NamedTuple, Optional, Pattern, # noqa
+ Sequence, Text)
+
+from .compat import to_text
+
+
+def make_regex(string, extra_flags=0):
+ # type: (str, int) -> Pattern[Text]
+ return re.compile(to_text(string), re.UNICODE | extra_flags)
+
+
+_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE)
+_export = make_regex(r"(?:export[^\S\r\n]+)?")
+_single_quoted_key = make_regex(r"'([^']+)'")
+_unquoted_key = make_regex(r"([^=\#\s]+)")
+_equal_sign = make_regex(r"[^\S\r\n]*=[^\S\r\n]*")
+_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'")
+_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"')
+_unquoted_value_part = make_regex(r"([^ \r\n]*)")
+_comment = make_regex(r"(?:\s*#[^\r\n]*)?")
+_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r)?")
+_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?")
+_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]")
+_single_quote_escapes = make_regex(r"\\[\\']")
+
+Binding = NamedTuple("Binding", [("key", Optional[Text]),
+ ("value", Optional[Text]),
+ ("original", Text)])
+
+
+class Error(Exception):
+ pass
+
+
+class Reader:
+ def __init__(self, stream):
+ # type: (IO[Text]) -> None
+ self.string = stream.read()
+ self.position = 0
+ self.mark = 0
+
+ def has_next(self):
+ # type: () -> bool
+ return self.position < len(self.string)
+
+ def set_mark(self):
+ # type: () -> None
+ self.mark = self.position
+
+ def get_marked(self):
+ # type: () -> Text
+ return self.string[self.mark:self.position]
+
+ def peek(self, count):
+ # type: (int) -> Text
+ return self.string[self.position:self.position + count]
+
+ def read(self, count):
+ # type: (int) -> Text
+ result = self.string[self.position:self.position + count]
+ if len(result) < count:
+ raise Error("read: End of string")
+ self.position += count
+ return result
+
+ def read_regex(self, regex):
+ # type: (Pattern[Text]) -> Sequence[Text]
+ match = regex.match(self.string, self.position)
+ if match is None:
+ raise Error("read_regex: Pattern not found")
+ self.position = match.end()
+ return match.groups()
+
+
+def decode_escapes(regex, string):
+ # type: (Pattern[Text], Text) -> Text
+ def decode_match(match):
+ # type: (Match[Text]) -> Text
+ return codecs.decode(match.group(0), 'unicode-escape') # type: ignore
+
+ return regex.sub(decode_match, string)
+
+
+def parse_key(reader):
+ # type: (Reader) -> Text
+ char = reader.peek(1)
+ if char == "'":
+ (key,) = reader.read_regex(_single_quoted_key)
+ else:
+ (key,) = reader.read_regex(_unquoted_key)
+ return key
+
+
+def parse_unquoted_value(reader):
+ # type: (Reader) -> Text
+ value = u""
+ while True:
+ (part,) = reader.read_regex(_unquoted_value_part)
+ value += part
+ after = reader.peek(2)
+ if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n":
+ return value
+ value += reader.read(2)
+
+
+def parse_value(reader):
+ # type: (Reader) -> Text
+ char = reader.peek(1)
+ if char == u"'":
+ (value,) = reader.read_regex(_single_quoted_value)
+ return decode_escapes(_single_quote_escapes, value)
+ elif char == u'"':
+ (value,) = reader.read_regex(_double_quoted_value)
+ return decode_escapes(_double_quote_escapes, value)
+ elif char in (u"", u"\n", u"\r"):
+ return u""
+ else:
+ return parse_unquoted_value(reader)
+
+
+def parse_binding(reader):
+ # type: (Reader) -> Binding
+ reader.set_mark()
+ try:
+ reader.read_regex(_whitespace)
+ reader.read_regex(_export)
+ key = parse_key(reader)
+ reader.read_regex(_equal_sign)
+ value = parse_value(reader)
+ reader.read_regex(_comment)
+ reader.read_regex(_end_of_line)
+ return Binding(key=key, value=value, original=reader.get_marked())
+ except Error:
+ reader.read_regex(_rest_of_line)
+ return Binding(key=None, value=None, original=reader.get_marked())
+
+
+def parse_stream(stream):
+ # type:(IO[Text]) -> Iterator[Binding]
+ reader = Reader(stream)
+ while reader.has_next():
+ try:
+ yield parse_binding(reader)
+ except Error:
+ return
diff --git a/tox.ini b/tox.ini
index 56c8732..077780f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,11 +19,11 @@ deps =
mypy
commands =
flake8 src tests
- mypy --python-version=3.7 src
- mypy --python-version=3.6 src
- mypy --python-version=3.5 src
- mypy --python-version=3.4 src
- mypy --python-version=2.7 src
+ mypy --python-version=3.7 src tests
+ mypy --python-version=3.6 src tests
+ mypy --python-version=3.5 src tests
+ mypy --python-version=3.4 src tests
+ mypy --python-version=2.7 src tests
[testenv:manifest]
deps = check-manifest
|
theskumar/python-dotenv
|
73124de45fcc21010ed9e90d4e5f576ba018496b
|
diff --git a/tests/test_core.py b/tests/test_core.py
index daf0f59..349c58b 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -13,7 +13,6 @@ from IPython.terminal.embed import InteractiveShellEmbed
from dotenv import dotenv_values, find_dotenv, load_dotenv, set_key
from dotenv.compat import PY2, StringIO
-from dotenv.main import Binding, parse_stream
@contextlib.contextmanager
@@ -26,81 +25,6 @@ def restore_os_environ():
os.environ.update(environ)
[email protected]("test_input,expected", [
- ("", []),
- ("a=b", [Binding(key="a", value="b", original="a=b")]),
- ("'a'=b", [Binding(key="'a'", value="b", original="'a'=b")]),
- ("[=b", [Binding(key="[", value="b", original="[=b")]),
- (" a = b ", [Binding(key="a", value="b", original=" a = b ")]),
- ("export a=b", [Binding(key="a", value="b", original="export a=b")]),
- (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]),
- (" export 'a'=b", [Binding(key="'a'", value="b", original=" export 'a'=b")]),
- ("# a=b", [Binding(key=None, value=None, original="# a=b")]),
- ('a=b # comment', [Binding(key="a", value="b", original="a=b # comment")]),
- ("a=b space ", [Binding(key="a", value="b space", original="a=b space ")]),
- ("a='b space '", [Binding(key="a", value="b space ", original="a='b space '")]),
- ('a="b space "', [Binding(key="a", value="b space ", original='a="b space "')]),
- ("export export_a=1", [Binding(key="export_a", value="1", original="export export_a=1")]),
- ("export port=8000", [Binding(key="port", value="8000", original="export port=8000")]),
- ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]),
- ("a='b\nc'", [Binding(key="a", value="b\nc", original="a='b\nc'")]),
- ('a="b\nc"', [Binding(key="a", value="b\nc", original='a="b\nc"')]),
- ('a="b\\nc"', [Binding(key="a", value='b\nc', original='a="b\\nc"')]),
- ('a="b\\"c"', [Binding(key="a", value='b"c', original='a="b\\"c"')]),
- ("a='b\\'c'", [Binding(key="a", value="b'c", original="a='b\\'c'")]),
- ("a=à", [Binding(key="a", value="à", original="a=à")]),
- ('a="à"', [Binding(key="a", value="à", original='a="à"')]),
- ('garbage', [Binding(key=None, value=None, original="garbage")]),
- (
- "a=b\nc=d",
- [
- Binding(key="a", value="b", original="a=b\n"),
- Binding(key="c", value="d", original="c=d"),
- ],
- ),
- (
- "a=b\r\nc=d",
- [
- Binding(key="a", value="b", original="a=b\r\n"),
- Binding(key="c", value="d", original="c=d"),
- ],
- ),
- (
- 'a=\nb=c',
- [
- Binding(key="a", value='', original='a=\n'),
- Binding(key="b", value='c', original="b=c"),
- ]
- ),
- (
- 'a="\nb=c',
- [
- Binding(key="a", value='"', original='a="\n'),
- Binding(key="b", value='c', original="b=c"),
- ]
- ),
- (
- '# comment\na="b\nc"\nd=e\n',
- [
- Binding(key=None, value=None, original="# comment\n"),
- Binding(key="a", value="b\nc", original='a="b\nc"\n'),
- Binding(key="d", value="e", original="d=e\n"),
- ],
- ),
- (
- 'garbage[%$#\na=b',
- [
- Binding(key=None, value=None, original="garbage[%$#\n"),
- Binding(key="a", value="b", original='a=b'),
- ],
- ),
-])
-def test_parse_stream(test_input, expected):
- result = parse_stream(StringIO(test_input))
-
- assert list(result) == expected
-
-
def test_warns_if_file_does_not_exist():
with warnings.catch_warnings(record=True) as w:
load_dotenv('.does_not_exist', verbose=True)
diff --git a/tests/test_parser.py b/tests/test_parser.py
new file mode 100644
index 0000000..f191f90
--- /dev/null
+++ b/tests/test_parser.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+import pytest
+
+from dotenv.compat import StringIO
+from dotenv.parser import Binding, parse_stream
+
+
[email protected]("test_input,expected", [
+ (u"", []),
+ (u"a=b", [Binding(key=u"a", value=u"b", original=u"a=b")]),
+ (u"'a'=b", [Binding(key=u"a", value=u"b", original=u"'a'=b")]),
+ (u"[=b", [Binding(key=u"[", value=u"b", original=u"[=b")]),
+ (u" a = b ", [Binding(key=u"a", value=u"b", original=u" a = b ")]),
+ (u"export a=b", [Binding(key=u"a", value=u"b", original=u"export a=b")]),
+ (u" export 'a'=b", [Binding(key=u"a", value=u"b", original=u" export 'a'=b")]),
+ (u"# a=b", [Binding(key=None, value=None, original=u"# a=b")]),
+ (u"a=b#c", [Binding(key=u"a", value=u"b#c", original=u"a=b#c")]),
+ (u'a=b # comment', [Binding(key=u"a", value=u"b", original=u"a=b # comment")]),
+ (u"a=b space ", [Binding(key=u"a", value=u"b space", original=u"a=b space ")]),
+ (u"a='b space '", [Binding(key=u"a", value=u"b space ", original=u"a='b space '")]),
+ (u'a="b space "', [Binding(key=u"a", value=u"b space ", original=u'a="b space "')]),
+ (u"export export_a=1", [Binding(key=u"export_a", value=u"1", original=u"export export_a=1")]),
+ (u"export port=8000", [Binding(key=u"port", value=u"8000", original=u"export port=8000")]),
+ (u'a="b\nc"', [Binding(key=u"a", value=u"b\nc", original=u'a="b\nc"')]),
+ (u"a='b\nc'", [Binding(key=u"a", value=u"b\nc", original=u"a='b\nc'")]),
+ (u'a="b\nc"', [Binding(key=u"a", value=u"b\nc", original=u'a="b\nc"')]),
+ (u'a="b\\nc"', [Binding(key=u"a", value=u'b\nc', original=u'a="b\\nc"')]),
+ (u"a='b\\nc'", [Binding(key=u"a", value=u'b\\nc', original=u"a='b\\nc'")]),
+ (u'a="b\\"c"', [Binding(key=u"a", value=u'b"c', original=u'a="b\\"c"')]),
+ (u"a='b\\'c'", [Binding(key=u"a", value=u"b'c", original=u"a='b\\'c'")]),
+ (u"a=à", [Binding(key=u"a", value=u"à", original=u"a=à")]),
+ (u'a="à"', [Binding(key=u"a", value=u"à", original=u'a="à"')]),
+ (u'garbage', [Binding(key=None, value=None, original=u"garbage")]),
+ (
+ u"a=b\nc=d",
+ [
+ Binding(key=u"a", value=u"b", original=u"a=b\n"),
+ Binding(key=u"c", value=u"d", original=u"c=d"),
+ ],
+ ),
+ (
+ u"a=b\r\nc=d",
+ [
+ Binding(key=u"a", value=u"b", original=u"a=b\r\n"),
+ Binding(key=u"c", value=u"d", original=u"c=d"),
+ ],
+ ),
+ (
+ u'a=\nb=c',
+ [
+ Binding(key=u"a", value=u'', original=u'a=\n'),
+ Binding(key=u"b", value=u'c', original=u"b=c"),
+ ]
+ ),
+ (
+ u'a=b\n\nc=d',
+ [
+ Binding(key=u"a", value=u"b", original=u"a=b\n"),
+ Binding(key=u"c", value=u"d", original=u"\nc=d"),
+ ]
+ ),
+ (
+ u'a="\nb=c',
+ [
+ Binding(key=None, value=None, original=u'a="\n'),
+ Binding(key=u"b", value=u"c", original=u"b=c"),
+ ]
+ ),
+ (
+ u'# comment\na="b\nc"\nd=e\n',
+ [
+ Binding(key=None, value=None, original=u"# comment\n"),
+ Binding(key=u"a", value=u"b\nc", original=u'a="b\nc"\n'),
+ Binding(key=u"d", value=u"e", original=u"d=e\n"),
+ ],
+ ),
+ (
+ u'garbage[%$#\na=b',
+ [
+ Binding(key=None, value=None, original=u"garbage[%$#\n"),
+ Binding(key=u"a", value=u"b", original=u'a=b'),
+ ],
+ ),
+])
+def test_parse_stream(test_input, expected):
+ result = parse_stream(StringIO(test_input))
+
+ assert list(result) == expected
|
Breaking changes in 0.10.0 and future changes
As reported by several users in https://github.com/theskumar/python-dotenv/pull/148, version 0.10.0 changed how some .env files are parsed in a breaking way, especially with regards to escapes. I think some of those changes were necessary, but some other may have been unexpected.
## Experiment
It was unclear what exactly broke and whether Python-dotenv is consistent with other parsers, so I ran an experiment to compare versions and packages.
The scripts to generate these tables are in [bbc2/dotenv-parser-comparisons](https://github.com/bbc2/dotenv-parser-comparisons). I may update them if I find new interesting behavior.
### Basic
```
foo=ab
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `b` |
| python-dotenv-0.10.1 | `a` `b` |
| bash-5.0.0 | `a` `b` |
| js-dotenv-6.2.0 | `a` `b` |
| ruby-dotenv-2.6.0 | `a` `b` |
### Escaped `z`
```
foo=a\zb
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `\` `\` `z` `b` |
| python-dotenv-0.10.1 | `a` `\` `z` `b` |
| bash-5.0.0 | `a` `z` `b` |
| js-dotenv-6.2.0 | `a` `\` `z` `b` |
| ruby-dotenv-2.6.0 | `a` `\` `z` `b` |
### Escaped and single-quoted `z`
```
foo='a\zb'
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `\` `z` `b` |
| python-dotenv-0.10.1 | `a` `\` `z` `b` |
| bash-5.0.0 | `a` `\` `z` `b` |
| js-dotenv-6.2.0 | `a` `\` `z` `b` |
| ruby-dotenv-2.6.0 | `a` `\` `z` `b` |
### Escaped and double-quoted `z`
```
foo="a\zb"
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `\` `z` `b` |
| python-dotenv-0.10.1 | `a` `\` `z` `b` |
| bash-5.0.0 | `a` `\` `z` `b` |
| js-dotenv-6.2.0 | `a` `\` `z` `b` |
| ruby-dotenv-2.6.0 | `a` `z` `b` |
### Escaped `n`
```
foo=a\nb
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `\` `\` `n` `b` |
| python-dotenv-0.10.1 | `a` `\` `n` `b` |
| bash-5.0.0 | `a` `n` `b` |
| js-dotenv-6.2.0 | `a` `\` `n` `b` |
| ruby-dotenv-2.6.0 | `a` `\` `n` `b` |
### Escaped and single-quoted `n`
```
foo='a\nb'
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `\` `n` `b` |
| python-dotenv-0.10.1 | `a` `\n` `b` |
| bash-5.0.0 | `a` `\` `n` `b` |
| js-dotenv-6.2.0 | `a` `\` `n` `b` |
| ruby-dotenv-2.6.0 | `a` `\` `n` `b` |
### Escaped and double-quoted `n`
```
foo="a\nb"
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `\` `n` `b` |
| python-dotenv-0.10.1 | `a` `\n` `b` |
| bash-5.0.0 | `a` `\` `n` `b` |
| js-dotenv-6.2.0 | `a` `\n` `b` |
| ruby-dotenv-2.6.0 | `a` `\n` `b` |
### Quoted newline
```
foo="a
b"
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `"` `a` |
| python-dotenv-0.10.1 | `a` `\n` `b` |
| bash-5.0.0 | `a` `\n` `b` |
| js-dotenv-6.2.0 | `a` |
| ruby-dotenv-2.6.0 | `a` `\n` `b` |
### Non-escaped space
```
foo=a b
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` ` ` `b` |
| python-dotenv-0.10.1 | `a` ` ` `b` |
| bash-5.0.0 | `a` |
| js-dotenv-6.2.0 | `a` ` ` `b` |
| ruby-dotenv-2.6.0 | `a` ` ` `b` |
### Non-escaped `#`
```
foo=a#b
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `#` `b` |
| python-dotenv-0.10.1 | `a` |
| bash-5.0.0 | `a` `#` `b` |
| js-dotenv-6.2.0 | `a` `#` `b` |
| ruby-dotenv-2.6.0 | `a` |
### Non-escaped spaced `#`
```
foo=a #b
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` ` ` `#` `b` |
| python-dotenv-0.10.1 | `a` |
| bash-5.0.0 | `a` |
| js-dotenv-6.2.0 | `a` ` ` `#` `b` |
| ruby-dotenv-2.6.0 | `a` |
### Escaped `#`
```
foo="a#b"
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `a` `#` `b` |
| python-dotenv-0.10.1 | `a` `#` `b` |
| bash-5.0.0 | `a` `#` `b` |
| js-dotenv-6.2.0 | `a` `#` `b` |
| ruby-dotenv-2.6.0 | `a` `#` `b` |
### UTF-8
```
foo=é
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `\` `x` `e` `9` |
| python-dotenv-0.10.1 | `é` |
| bash-5.0.0 | `é` |
| js-dotenv-6.2.0 | `é` |
| ruby-dotenv-2.6.0 | `é` |
### Quoted UTF-8
```
foo="é"
```
| parser | output |
|--|--|
| python-dotenv-0.9.1 | `é` |
| python-dotenv-0.10.1 | `é` |
| bash-5.0.0 | `é` |
| js-dotenv-6.2.0 | `é` |
| ruby-dotenv-2.6.0 | `é` |
## Conclusion
1. Non-quoted escapes (valid or invalid): All up-to-date parsers except Bash do the same thing. Bash removes the `\`. 0.9.1 added a `\`, which was obviously incorrect.
2. Single-quoted invalid escapes: Everything is fine.
3. Single-quoted valid escapes: 0.10.1 is the only parser that interprets them as control characters.
4. Double-quoted invalid escapes: It's fine except for Ruby.
5. Double-quoted valid escapes: All up-to-date parsers except Bash do the same thing. Bash and 0.9.1 keep the original characters instead of interpreting them as control characters.
6. Pound sign `#`: Interpreted as a comment delimiter since 0.10.0 if unquoted, even if there is no whitespace preceding it. When quoted or prefixed with whitespace, everything is fine except for JavaScript.
7. Non-quoted UTF-8: Fixed in 0.10.0. When quoted, everything is fine.
8. Non-escaped space: Only Bash ignores everything after it (or treats the rest as a command). Other parsers include everything until the end of the line.
My opinion:
* (2, 4, 7) are OK.
* (1, 5, 8) are where Bash differs from other parsers. It isn't obvious what we should do.
* (3, 6) are where python-dotenv 0.10.0 is quite obviously broken and should be fixed.
|
0.0
|
73124de45fcc21010ed9e90d4e5f576ba018496b
|
[
"tests/test_core.py::test_warns_if_file_does_not_exist",
"tests/test_core.py::test_find_dotenv",
"tests/test_core.py::test_load_dotenv",
"tests/test_core.py::test_load_dotenv_override",
"tests/test_core.py::test_load_dotenv_in_current_dir",
"tests/test_core.py::test_ipython",
"tests/test_core.py::test_ipython_override",
"tests/test_core.py::test_dotenv_values_stream",
"tests/test_core.py::test_dotenv_values_export",
"tests/test_core.py::test_dotenv_values_utf_8",
"tests/test_core.py::test_dotenv_empty_selfreferential_interpolation",
"tests/test_core.py::test_dotenv_nonempty_selfreferential_interpolation",
"tests/test_parser.py::test_parse_stream[-expected0]",
"tests/test_parser.py::test_parse_stream[a=b-expected1]",
"tests/test_parser.py::test_parse_stream['a'=b-expected2]",
"tests/test_parser.py::test_parse_stream[[=b-expected3]",
"tests/test_parser.py::test_parse_stream[",
"tests/test_parser.py::test_parse_stream[export",
"tests/test_parser.py::test_parse_stream[#",
"tests/test_parser.py::test_parse_stream[a=b#c-expected8]",
"tests/test_parser.py::test_parse_stream[a=b",
"tests/test_parser.py::test_parse_stream[a='b",
"tests/test_parser.py::test_parse_stream[a=\"b",
"tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected15]",
"tests/test_parser.py::test_parse_stream[a='b\\nc'-expected16]",
"tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected17]",
"tests/test_parser.py::test_parse_stream[a=\"b\\\\nc\"-expected18]",
"tests/test_parser.py::test_parse_stream[a='b\\\\nc'-expected19]",
"tests/test_parser.py::test_parse_stream[a=\"b\\\\\"c\"-expected20]",
"tests/test_parser.py::test_parse_stream[a='b\\\\'c'-expected21]",
"tests/test_parser.py::test_parse_stream[a=\\xe0-expected22]",
"tests/test_parser.py::test_parse_stream[a=\"\\xe0\"-expected23]",
"tests/test_parser.py::test_parse_stream[garbage-expected24]",
"tests/test_parser.py::test_parse_stream[a=b\\nc=d-expected25]",
"tests/test_parser.py::test_parse_stream[a=b\\r\\nc=d-expected26]",
"tests/test_parser.py::test_parse_stream[a=\\nb=c-expected27]",
"tests/test_parser.py::test_parse_stream[a=b\\n\\nc=d-expected28]",
"tests/test_parser.py::test_parse_stream[a=\"\\nb=c-expected29]",
"tests/test_parser.py::test_parse_stream[garbage[%$#\\na=b-expected31]"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-21 21:56:18+00:00
|
bsd-3-clause
| 5,882 |
|
theskumar__python-dotenv-236
|
diff --git a/.travis.yml b/.travis.yml
index 4b1f886..b26433a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,6 @@
language: python
cache: pip
os: linux
-dist: xenial
jobs:
include:
@@ -21,10 +20,8 @@ jobs:
env: TOXENV=py38
- python: "pypy"
env: TOXENV=pypy
- dist: trusty
- python: "pypy3"
env: TOXENV=pypy3
- dist: trusty
install:
- pip install tox
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 00ce4dc..b693ba7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,6 +12,10 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
- Use current working directory to find `.env` when bundled by PyInstaller (#213 by
[@gergelyk]).
+### Fixed
+
+- Fix escaping of quoted values written by `set_key` (#236 by [@bbc2]).
+
## [0.11.0] - 2020-02-07
### Added
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index ce83155..93d617d 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -153,8 +153,11 @@ def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"):
if " " in value_to_set:
quote_mode = "always"
- line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n'
- line_out = line_template.format(key_to_set, value_to_set)
+ if quote_mode == "always":
+ value_out = '"{}"'.format(value_to_set.replace('"', '\\"'))
+ else:
+ value_out = value_to_set
+ line_out = "{}={}\n".format(key_to_set, value_out)
with rewrite(dotenv_path) as (source, dest):
replaced = False
|
theskumar/python-dotenv
|
ba5a16e0e349a6a1f26927c5f833a95c4c645647
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 3416e2c..a4fb5b4 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -29,14 +29,25 @@ def test_set_key_no_file(tmp_path):
)
-def test_set_key_new(dotenv_file):
[email protected](
+ "key,value,expected,content",
+ [
+ ("a", "", (True, "a", ""), 'a=""\n'),
+ ("a", "b", (True, "a", "b"), 'a="b"\n'),
+ ("a", "'b'", (True, "a", "b"), 'a="b"\n'),
+ ("a", "\"b\"", (True, "a", "b"), 'a="b"\n'),
+ ("a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'),
+ ("a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'),
+ ],
+)
+def test_set_key_new(dotenv_file, key, value, expected, content):
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "warning") as mock_warning:
- result = dotenv.set_key(dotenv_file, "foo", "bar")
+ result = dotenv.set_key(dotenv_file, key, value)
- assert result == (True, "foo", "bar")
- assert open(dotenv_file, "r").read() == 'foo="bar"\n'
+ assert result == expected
+ assert open(dotenv_file, "r").read() == content
mock_warning.assert_not_called()
|
set_key doesn't escape values with quotes in them
Suppose you have this code:
```python
import dotenv
import json
key = "KEY_TO_SAVE"
value = json.dumps({"test": "me"})
dotenv.set_key(".env", key, value)
```
What I would expect to see is a `.env` file that looks like this:
```
KEY_TO_SAVE="{\"test\": \"me\"}"
```
However what I actually see is this:
```
KEY_TO_SAVE="{"test": "me"}"
```
Because of the lack of escaping quote character, dotenv has effectively changed the value I was trying to set. In this case, the value passed to dotenv is valid JSON, but the value dotenv saves to the file is not valid JSON.
|
0.0
|
ba5a16e0e349a6a1f26927c5f833a95c4c645647
|
[
"tests/test_main.py::test_set_key_new[a-b\"c-expected5-a=\"b\\\\\"c\"\\n]"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key_new[a--expected0-a=\"\"\\n]",
"tests/test_main.py::test_set_key_new[a-b-expected1-a=\"b\"\\n]",
"tests/test_main.py::test_set_key_new[a-'b'-expected2-a=\"b\"\\n]",
"tests/test_main.py::test_set_key_new[a-\"b\"-expected3-a=\"b\"\\n]",
"tests/test_main.py::test_set_key_new[a-b'c-expected4-a=\"b'c\"\\n]",
"tests/test_main.py::test_set_key_new_with_other_values",
"tests/test_main.py::test_set_key_existing",
"tests/test_main.py::test_set_key_existing_with_other_values",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_utf_8",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_stream[env4-b=c\\na=${b}-True-expected4]",
"tests/test_main.py::test_dotenv_values_stream[env5-a=${b}-True-expected5]",
"tests/test_main.py::test_dotenv_values_stream[env6-a=\"${b}\"-True-expected6]",
"tests/test_main.py::test_dotenv_values_stream[env7-a='${b}'-True-expected7]",
"tests/test_main.py::test_dotenv_values_stream[env8-a=${a}-True-expected8]",
"tests/test_main.py::test_dotenv_values_stream[env9-a=${a}-True-expected9]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-12 22:37:50+00:00
|
bsd-3-clause
| 5,883 |
|
theskumar__python-dotenv-238
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 47163f3..8566a50 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
- Fix escaping of quoted values written by `set_key` (#236 by [@bbc2]).
- Fix `dotenv run` crashing on environment variables without values (#237 by [@yannham]).
+- Remove warning when last line is empty (#238 by [@bbc2]).
## [0.11.0] - 2020-02-07
diff --git a/src/dotenv/parser.py b/src/dotenv/parser.py
index 2904af8..2c93cbd 100644
--- a/src/dotenv/parser.py
+++ b/src/dotenv/parser.py
@@ -197,6 +197,13 @@ def parse_binding(reader):
reader.set_mark()
try:
reader.read_regex(_multiline_whitespace)
+ if not reader.has_next():
+ return Binding(
+ key=None,
+ value=None,
+ original=reader.get_marked(),
+ error=False,
+ )
reader.read_regex(_export)
key = parse_key(reader)
reader.read_regex(_whitespace)
|
theskumar/python-dotenv
|
2f58bccad26e0b728d32ec9bf9493671212dc24f
|
diff --git a/tests/test_main.py b/tests/test_main.py
index a4fb5b4..d867858 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -30,63 +30,32 @@ def test_set_key_no_file(tmp_path):
@pytest.mark.parametrize(
- "key,value,expected,content",
+ "before,key,value,expected,after",
[
- ("a", "", (True, "a", ""), 'a=""\n'),
- ("a", "b", (True, "a", "b"), 'a="b"\n'),
- ("a", "'b'", (True, "a", "b"), 'a="b"\n'),
- ("a", "\"b\"", (True, "a", "b"), 'a="b"\n'),
- ("a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'),
- ("a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'),
+ ("", "a", "", (True, "a", ""), 'a=""\n'),
+ ("", "a", "b", (True, "a", "b"), 'a="b"\n'),
+ ("", "a", "'b'", (True, "a", "b"), 'a="b"\n'),
+ ("", "a", "\"b\"", (True, "a", "b"), 'a="b"\n'),
+ ("", "a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'),
+ ("", "a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'),
+ ("a=b", "a", "c", (True, "a", "c"), 'a="c"\n'),
+ ("a=b\n", "a", "c", (True, "a", "c"), 'a="c"\n'),
+ ("a=b\n\n", "a", "c", (True, "a", "c"), 'a="c"\n\n'),
+ ("a=b\nc=d", "a", "e", (True, "a", "e"), 'a="e"\nc=d'),
+ ("a=b\nc=d\ne=f", "c", "g", (True, "c", "g"), 'a=b\nc="g"\ne=f'),
+ ("a=b\n", "c", "d", (True, "c", "d"), 'a=b\nc="d"\n'),
],
)
-def test_set_key_new(dotenv_file, key, value, expected, content):
+def test_set_key(dotenv_file, before, key, value, expected, after):
logger = logging.getLogger("dotenv.main")
+ with open(dotenv_file, "w") as f:
+ f.write(before)
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.set_key(dotenv_file, key, value)
assert result == expected
- assert open(dotenv_file, "r").read() == content
- mock_warning.assert_not_called()
-
-
-def test_set_key_new_with_other_values(dotenv_file):
- logger = logging.getLogger("dotenv.main")
- with open(dotenv_file, "w") as f:
- f.write("a=b\n")
-
- with mock.patch.object(logger, "warning") as mock_warning:
- result = dotenv.set_key(dotenv_file, "foo", "bar")
-
- assert result == (True, "foo", "bar")
- assert open(dotenv_file, "r").read() == 'a=b\nfoo="bar"\n'
- mock_warning.assert_not_called()
-
-
-def test_set_key_existing(dotenv_file):
- logger = logging.getLogger("dotenv.main")
- with open(dotenv_file, "w") as f:
- f.write("foo=bar")
-
- with mock.patch.object(logger, "warning") as mock_warning:
- result = dotenv.set_key(dotenv_file, "foo", "baz")
-
- assert result == (True, "foo", "baz")
- assert open(dotenv_file, "r").read() == 'foo="baz"\n'
- mock_warning.assert_not_called()
-
-
-def test_set_key_existing_with_other_values(dotenv_file):
- logger = logging.getLogger("dotenv.main")
- with open(dotenv_file, "w") as f:
- f.write("a=b\nfoo=bar\nc=d")
-
- with mock.patch.object(logger, "warning") as mock_warning:
- result = dotenv.set_key(dotenv_file, "foo", "baz")
-
- assert result == (True, "foo", "baz")
- assert open(dotenv_file, "r").read() == 'a=b\nfoo="baz"\nc=d'
+ assert open(dotenv_file, "r").read() == after
mock_warning.assert_not_called()
diff --git a/tests/test_parser.py b/tests/test_parser.py
index dae51d3..f807513 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -86,6 +86,19 @@ from dotenv.parser import Binding, Original, parse_stream
Binding(key=u"b", value=u'c', original=Original(string=u"b=c", line=2), error=False),
]
),
+ (
+ u"\n\n",
+ [
+ Binding(key=None, value=None, original=Original(string=u"\n\n", line=1), error=False),
+ ]
+ ),
+ (
+ u"a=b\n\n",
+ [
+ Binding(key=u"a", value=u"b", original=Original(string=u"a=b\n", line=1), error=False),
+ Binding(key=None, value=None, original=Original(string=u"\n", line=2), error=False),
+ ]
+ ),
(
u'a=b\n\nc=d',
[
|
Python-dotenv could not parse statement
Hi Guys,
I use dotenv in my Django app and ran into one issue when i runserver:
`> Python-dotenv could not parse statement starting at line 10`
although the django seem working fine, but I couldn't feel right about this.
Here is my .env file:
```
DJANGO_SECRET_KEY="xxxxxxxxxxxxxxxxx"
DEBUG=True
DATABASE_DEFAULT_ENGINE="django.db.backends.postgresql"
DATABASE_DEFAULT_NAME="postgres"
DATABASE_DEFAULT_USER="postgres"
DATABASE_DEFAULT_PASSWORD="abc123"
DATABASE_DEFAULT_HOST="localhost"
DATABASE_DEFAULT_PORT="5432"
SENDGRID_API_KEY="xxxxxxxxxxxxxxxxxxxxxxxx"
```
And i have load the dotenv in wsgi.py:
```
import os
from dotenv import load_dotenv
from django.core.wsgi import get_wsgi_application
load_dotenv()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webapp.settings')
application = get_wsgi_application()
```
and i even done it in manage.py:
```
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
env_path = Path('webapp') / '.env'
load_dotenv(dotenv_path=env_path)
... the rest as usual
```
is this a bug or my own mistakes?
|
0.0
|
2f58bccad26e0b728d32ec9bf9493671212dc24f
|
[
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a=\"c\"\\n\\n]",
"tests/test_parser.py::test_parse_stream[\\n\\n-expected30]",
"tests/test_parser.py::test_parse_stream[a=b\\n\\n-expected31]"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=\"\"\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a=\"b'c\"\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a=\"b\\\\\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a=\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a=\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a=\"e\"\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc=\"g\"\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc=\"d\"\\n]",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_utf_8",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_stream[env4-b=c\\na=${b}-True-expected4]",
"tests/test_main.py::test_dotenv_values_stream[env5-a=${b}-True-expected5]",
"tests/test_main.py::test_dotenv_values_stream[env6-a=\"${b}\"-True-expected6]",
"tests/test_main.py::test_dotenv_values_stream[env7-a='${b}'-True-expected7]",
"tests/test_main.py::test_dotenv_values_stream[env8-a=${a}-True-expected8]",
"tests/test_main.py::test_dotenv_values_stream[env9-a=${a}-True-expected9]",
"tests/test_parser.py::test_parse_stream[-expected0]",
"tests/test_parser.py::test_parse_stream[a=b-expected1]",
"tests/test_parser.py::test_parse_stream['a'=b-expected2]",
"tests/test_parser.py::test_parse_stream[[=b-expected3]",
"tests/test_parser.py::test_parse_stream[",
"tests/test_parser.py::test_parse_stream[export",
"tests/test_parser.py::test_parse_stream[#",
"tests/test_parser.py::test_parse_stream[a=b#c-expected8]",
"tests/test_parser.py::test_parse_stream[a=b",
"tests/test_parser.py::test_parse_stream[a='b",
"tests/test_parser.py::test_parse_stream[a=\"b",
"tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected15]",
"tests/test_parser.py::test_parse_stream[a='b\\nc'-expected16]",
"tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected17]",
"tests/test_parser.py::test_parse_stream[a=\"b\\\\nc\"-expected18]",
"tests/test_parser.py::test_parse_stream[a='b\\\\nc'-expected19]",
"tests/test_parser.py::test_parse_stream[a=\"b\\\\\"c\"-expected20]",
"tests/test_parser.py::test_parse_stream[a='b\\\\'c'-expected21]",
"tests/test_parser.py::test_parse_stream[a=\\xe0-expected22]",
"tests/test_parser.py::test_parse_stream[a=\"\\xe0\"-expected23]",
"tests/test_parser.py::test_parse_stream[no_value_var-expected24]",
"tests/test_parser.py::test_parse_stream[a:",
"tests/test_parser.py::test_parse_stream[a=b\\nc=d-expected26]",
"tests/test_parser.py::test_parse_stream[a=b\\rc=d-expected27]",
"tests/test_parser.py::test_parse_stream[a=b\\r\\nc=d-expected28]",
"tests/test_parser.py::test_parse_stream[a=\\nb=c-expected29]",
"tests/test_parser.py::test_parse_stream[a=b\\n\\nc=d-expected32]",
"tests/test_parser.py::test_parse_stream[a=\"\\nb=c-expected33]",
"tests/test_parser.py::test_parse_stream[a=b\\n#",
"tests/test_parser.py::test_parse_stream[uglyKey[%$=\"S3cr3t_P4ssw#rD\""
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-21 22:46:26+00:00
|
bsd-3-clause
| 5,884 |
|
theskumar__python-dotenv-260
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 116d97f..a01d3dc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,7 +7,9 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
-*No unreleased change at this time.*
+### Fixed
+
+- Fix potentially empty expanded value for duplicate key (#260 by [@bbc]).
## [0.14.0] - 2020-07-03
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index 8f77e83..607299a 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -18,7 +18,7 @@ logger = logging.getLogger(__name__)
if IS_TYPE_CHECKING:
from typing import (
- Dict, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple
+ Dict, Iterable, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple
)
if sys.version_info >= (3, 6):
_PathLike = os.PathLike
@@ -83,9 +83,13 @@ class DotEnv():
if self._dict:
return self._dict
- values = OrderedDict(self.parse())
- self._dict = resolve_nested_variables(values) if self.interpolate else values
- return self._dict
+ if self.interpolate:
+ values = resolve_nested_variables(self.parse())
+ else:
+ values = OrderedDict(self.parse())
+
+ self._dict = values
+ return values
def parse(self):
# type: () -> Iterator[Tuple[Text, Optional[Text]]]
@@ -211,7 +215,7 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
def resolve_nested_variables(values):
- # type: (Dict[Text, Optional[Text]]) -> Dict[Text, Optional[Text]]
+ # type: (Iterable[Tuple[Text, Optional[Text]]]) -> Dict[Text, Optional[Text]]
def _replacement(name, default):
# type: (Text, Optional[Text]) -> Text
default = default if default is not None else ""
@@ -229,7 +233,7 @@ def resolve_nested_variables(values):
new_values = {}
- for k, v in values.items():
+ for (k, v) in values:
new_values[k] = __posix_variable.sub(_re_sub_callback, v) if v is not None else None
return new_values
|
theskumar/python-dotenv
|
e4bbb8a2aa881409af6fb92933c18e2af6609da8
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 3a3d059..339d00b 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -337,6 +337,8 @@ def test_dotenv_values_file(dotenv_file):
# Re-defined and used in file
({"b": "c"}, "b=d\na=${b}", True, {"a": "d", "b": "d"}),
+ ({}, "a=b\na=c\nd=${a}", True, {"a": "c", "d": "c"}),
+ ({}, "a=b\nc=${a}\nd=e\nc=${d}", True, {"a": "b", "c": "e", "d": "e"}),
],
)
def test_dotenv_values_stream(env, string, interpolate, expected):
|
Unexpected Behavior when Parsing Duplicate Key
Hello. I started using this package yesterday and I am sorry if I am missing something simple.
I am trying to read an env file that looks like this:
```
export hello=hi
export greetings=${hello}
export goodbye=bye
export greetings=${goodbye}
```
To read it, I wrote the following small program:
```python
import os
import dotenv
from pathlib import Path
env_path = './test.env'
dotenv.load_dotenv(dotenv_path=env_path,verbose=True)
print(os.getenv('greetings'))
```
When I run this program, it outputs a single empty line. This happens even when I set override to True. I was expecting the program to output 'bye'. Is this a bug or the expected behavior for handling duplicate keys ? I am currently running 3.7.3 and python-dotenv version 0.10.3. Thank you.
|
0.0
|
e4bbb8a2aa881409af6fb92933c18e2af6609da8
|
[
"tests/test_main.py::test_dotenv_values_stream[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=\"\"\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a=\"b'c\"\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a=\"b\\\\\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a=\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a=\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a=\"c\"\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a=\"e\"\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc=\"g\"\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc=\"d\"\\n]",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_utf_8",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_stream[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_stream[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_stream[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_stream[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_stream[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_stream[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_stream[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_stream[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_stream[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_stream[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_stream[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_stream[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_stream[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_stream[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_stream[env18-a=b\\na=c\\nd=${a}-True-expected18]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-03 16:57:54+00:00
|
bsd-3-clause
| 5,885 |
|
theskumar__python-dotenv-277
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 34cdb32..b5305f1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -19,8 +19,10 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
### Fixed
-- Fix potentially empty expanded value for duplicate key (#260 by [@bbc]).
+- Fix potentially empty expanded value for duplicate key (#260 by [@bbc2]).
- Fix import error on Python 3.5.0 and 3.5.1 (#267 by [@gongqingkui]).
+- Fix parsing of unquoted values containing several adjacent space or tab characters
+ (#277 by [@bbc2], review by [@x-yuri]).
## [0.14.0] - 2020-07-03
@@ -226,6 +228,7 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
[@theskumar]: https://github.com/theskumar
[@ulyssessouza]: https://github.com/ulyssessouza
[@venthur]: https://github.com/venthur
+[@x-yuri]: https://github.com/x-yuri
[@yannham]: https://github.com/yannham
[Unreleased]: https://github.com/theskumar/python-dotenv/compare/v0.14.0...HEAD
diff --git a/src/dotenv/parser.py b/src/dotenv/parser.py
index 4eba0ac..5cb1cdf 100644
--- a/src/dotenv/parser.py
+++ b/src/dotenv/parser.py
@@ -24,7 +24,7 @@ _unquoted_key = make_regex(r"([^=\#\s]+)")
_equal_sign = make_regex(r"(=[^\S\r\n]*)")
_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'")
_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"')
-_unquoted_value_part = make_regex(r"([^ \r\n]*)")
+_unquoted_value = make_regex(r"([^\r\n]*)")
_comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?")
_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)")
_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?")
@@ -167,14 +167,8 @@ def parse_key(reader):
def parse_unquoted_value(reader):
# type: (Reader) -> Text
- value = u""
- while True:
- (part,) = reader.read_regex(_unquoted_value_part)
- value += part
- after = reader.peek(2)
- if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n":
- return value
- value += reader.read(2)
+ (part,) = reader.read_regex(_unquoted_value)
+ return re.sub(r"\s+#.*", "", part).rstrip()
def parse_value(reader):
|
theskumar/python-dotenv
|
6ca2e2ab399c7e41be276bc830f21af3092f5d28
|
diff --git a/tests/test_parser.py b/tests/test_parser.py
index f807513..48cecdc 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -19,20 +19,40 @@ from dotenv.parser import Binding, Original, parse_stream
(u"# a=b", [Binding(key=None, value=None, original=Original(string=u"# a=b", line=1), error=False)]),
(u"a=b#c", [Binding(key=u"a", value=u"b#c", original=Original(string=u"a=b#c", line=1), error=False)]),
(
- u'a=b # comment',
- [Binding(key=u"a", value=u"b", original=Original(string=u"a=b # comment", line=1), error=False)],
+ u'a=b #c',
+ [Binding(key=u"a", value=u"b", original=Original(string=u"a=b #c", line=1), error=False)],
),
(
- u"a=b space ",
- [Binding(key=u"a", value=u"b space", original=Original(string=u"a=b space ", line=1), error=False)],
+ u'a=b\t#c',
+ [Binding(key=u"a", value=u"b", original=Original(string=u"a=b\t#c", line=1), error=False)],
),
(
- u"a='b space '",
- [Binding(key=u"a", value=u"b space ", original=Original(string=u"a='b space '", line=1), error=False)],
+ u"a=b c",
+ [Binding(key=u"a", value=u"b c", original=Original(string=u"a=b c", line=1), error=False)],
),
(
- u'a="b space "',
- [Binding(key=u"a", value=u"b space ", original=Original(string=u'a="b space "', line=1), error=False)],
+ u"a=b\tc",
+ [Binding(key=u"a", value=u"b\tc", original=Original(string=u"a=b\tc", line=1), error=False)],
+ ),
+ (
+ u"a=b c",
+ [Binding(key=u"a", value=u"b c", original=Original(string=u"a=b c", line=1), error=False)],
+ ),
+ (
+ u"a=b\u00a0 c",
+ [Binding(key=u"a", value=u"b\u00a0 c", original=Original(string=u"a=b\u00a0 c", line=1), error=False)],
+ ),
+ (
+ u"a=b c ",
+ [Binding(key=u"a", value=u"b c", original=Original(string=u"a=b c ", line=1), error=False)],
+ ),
+ (
+ u"a='b c '",
+ [Binding(key=u"a", value=u"b c ", original=Original(string=u"a='b c '", line=1), error=False)],
+ ),
+ (
+ u'a="b c "',
+ [Binding(key=u"a", value=u"b c ", original=Original(string=u'a="b c "', line=1), error=False)],
),
(
u"export export_a=1",
|
Parsing rules are somewhat unexpected
`A=1 2` (one space) works, but `A=1 2` (two spaces) produces an error. I've run into it with `docker-compose`:
`.env`:
```yaml
A=1 2
```
`docker-compose.yml`:
```yaml
version: '3'
services:
app:
image: alpine:3.12
command: /app/1.sh
env_file: .env
volumes:
- .:/app
```
`1.sh`:
```sh
#!/bin/sh -eux
set | grep ^A
```
```
$ docker-compose up -d && docker-compose logs -f
WARNING: Python-dotenv could not parse statement starting at line 1
WARNING: Python-dotenv could not parse statement starting at line 1
WARNING: Python-dotenv could not parse statement starting at line 1
WARNING: Python-dotenv could not parse statement starting at line 1
Creating network "overriding-vars_default" with the default driver
Creating overriding-vars_app_1 ... done
WARNING: Python-dotenv could not parse statement starting at line 1
WARNING: Python-dotenv could not parse statement starting at line 1
Attaching to overriding-vars_app_1
app_1 | + set
app_1 | + grep ^A
overriding-vars_app_1 exited with code 1
```
With `A=1 2`:
```
$ docker-compose up -d && docker-compose logs -f
Recreating overriding-vars_app_1 ... done
Attaching to overriding-vars_app_1
app_1 | + set
app_1 | + grep ^A
app_1 | A='1 2'
overriding-vars_app_1 exited with code 0
```
I'll provide a Python test case, if need be.
https://github.com/theskumar/python-dotenv/blob/v0.14.0/src/dotenv/parser.py#L175-L176
|
0.0
|
6ca2e2ab399c7e41be276bc830f21af3092f5d28
|
[
"tests/test_parser.py::test_parse_stream[a=b",
"tests/test_parser.py::test_parse_stream[a=b\\t#c-expected10]"
] |
[
"tests/test_parser.py::test_parse_stream[-expected0]",
"tests/test_parser.py::test_parse_stream[a=b-expected1]",
"tests/test_parser.py::test_parse_stream['a'=b-expected2]",
"tests/test_parser.py::test_parse_stream[[=b-expected3]",
"tests/test_parser.py::test_parse_stream[",
"tests/test_parser.py::test_parse_stream[export",
"tests/test_parser.py::test_parse_stream[#",
"tests/test_parser.py::test_parse_stream[a=b#c-expected8]",
"tests/test_parser.py::test_parse_stream[a=b\\tc-expected12]",
"tests/test_parser.py::test_parse_stream[a=b\\xa0",
"tests/test_parser.py::test_parse_stream[a='b",
"tests/test_parser.py::test_parse_stream[a=\"b",
"tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected20]",
"tests/test_parser.py::test_parse_stream[a='b\\nc'-expected21]",
"tests/test_parser.py::test_parse_stream[a=\"b\\nc\"-expected22]",
"tests/test_parser.py::test_parse_stream[a=\"b\\\\nc\"-expected23]",
"tests/test_parser.py::test_parse_stream[a='b\\\\nc'-expected24]",
"tests/test_parser.py::test_parse_stream[a=\"b\\\\\"c\"-expected25]",
"tests/test_parser.py::test_parse_stream[a='b\\\\'c'-expected26]",
"tests/test_parser.py::test_parse_stream[a=\\xe0-expected27]",
"tests/test_parser.py::test_parse_stream[a=\"\\xe0\"-expected28]",
"tests/test_parser.py::test_parse_stream[no_value_var-expected29]",
"tests/test_parser.py::test_parse_stream[a:",
"tests/test_parser.py::test_parse_stream[a=b\\nc=d-expected31]",
"tests/test_parser.py::test_parse_stream[a=b\\rc=d-expected32]",
"tests/test_parser.py::test_parse_stream[a=b\\r\\nc=d-expected33]",
"tests/test_parser.py::test_parse_stream[a=\\nb=c-expected34]",
"tests/test_parser.py::test_parse_stream[\\n\\n-expected35]",
"tests/test_parser.py::test_parse_stream[a=b\\n\\n-expected36]",
"tests/test_parser.py::test_parse_stream[a=b\\n\\nc=d-expected37]",
"tests/test_parser.py::test_parse_stream[a=\"\\nb=c-expected38]",
"tests/test_parser.py::test_parse_stream[a=b\\n#",
"tests/test_parser.py::test_parse_stream[uglyKey[%$=\"S3cr3t_P4ssw#rD\""
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-09 09:06:18+00:00
|
bsd-3-clause
| 5,886 |
|
theskumar__python-dotenv-287
|
diff --git a/.travis.yml b/.travis.yml
index 8f51de3..8ccd240 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,6 +33,7 @@ script:
- tox
before_install:
+ - pip install --upgrade pip
- pip install coveralls
after_success:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 56a7a94..effa251 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,7 +7,7 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
-_There are no unreleased changes at this time._
+- Fix resolution order in variable expansion with `override=False` (#? by [@bbc2]).
## [0.15.0] - 2020-10-28
diff --git a/README.md b/README.md
index 5c9aeaf..36f3b2b 100644
--- a/README.md
+++ b/README.md
@@ -41,13 +41,22 @@ export SECRET_KEY=YOURSECRETKEYGOESHERE
Python-dotenv can interpolate variables using POSIX variable expansion.
-The value of a variable is the first of the values defined in the following list:
+With `load_dotenv(override=True)` or `dotenv_values()`, the value of a variable is the
+first of the values defined in the following list:
- Value of that variable in the `.env` file.
- Value of that variable in the environment.
- Default value, if provided.
- Empty string.
+With `load_dotenv(override=False)`, the value of a variable is the first of the values
+defined in the following list:
+
+- Value of that variable in the environment.
+- Value of that variable in the `.env` file.
+- Default value, if provided.
+- Empty string.
+
Ensure that variables are surrounded with `{}` like `${HOME}` as bare
variables such as `$HOME` are not expanded.
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index 58a23f3..b366b18 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -4,7 +4,6 @@ from __future__ import absolute_import, print_function, unicode_literals
import io
import logging
import os
-import re
import shutil
import sys
import tempfile
@@ -13,13 +12,13 @@ from contextlib import contextmanager
from .compat import IS_TYPE_CHECKING, PY2, StringIO, to_env
from .parser import Binding, parse_stream
+from .variables import parse_variables
logger = logging.getLogger(__name__)
if IS_TYPE_CHECKING:
- from typing import (
- Dict, Iterable, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple
- )
+ from typing import (IO, Dict, Iterable, Iterator, Mapping, Optional, Text,
+ Tuple, Union)
if sys.version_info >= (3, 6):
_PathLike = os.PathLike
else:
@@ -30,18 +29,6 @@ if IS_TYPE_CHECKING:
else:
_StringIO = StringIO[Text]
-__posix_variable = re.compile(
- r"""
- \$\{
- (?P<name>[^\}:]*)
- (?::-
- (?P<default>[^\}]*)
- )?
- \}
- """,
- re.VERBOSE,
-) # type: Pattern[Text]
-
def with_warn_for_invalid_lines(mappings):
# type: (Iterator[Binding]) -> Iterator[Binding]
@@ -56,13 +43,14 @@ def with_warn_for_invalid_lines(mappings):
class DotEnv():
- def __init__(self, dotenv_path, verbose=False, encoding=None, interpolate=True):
- # type: (Union[Text, _PathLike, _StringIO], bool, Union[None, Text], bool) -> None
+ def __init__(self, dotenv_path, verbose=False, encoding=None, interpolate=True, override=True):
+ # type: (Union[Text, _PathLike, _StringIO], bool, Union[None, Text], bool, bool) -> None
self.dotenv_path = dotenv_path # type: Union[Text,_PathLike, _StringIO]
self._dict = None # type: Optional[Dict[Text, Optional[Text]]]
self.verbose = verbose # type: bool
self.encoding = encoding # type: Union[None, Text]
self.interpolate = interpolate # type: bool
+ self.override = override # type: bool
@contextmanager
def _get_stream(self):
@@ -83,13 +71,14 @@ class DotEnv():
if self._dict:
return self._dict
+ raw_values = self.parse()
+
if self.interpolate:
- values = resolve_nested_variables(self.parse())
+ self._dict = OrderedDict(resolve_variables(raw_values, override=self.override))
else:
- values = OrderedDict(self.parse())
+ self._dict = OrderedDict(raw_values)
- self._dict = values
- return values
+ return self._dict
def parse(self):
# type: () -> Iterator[Tuple[Text, Optional[Text]]]
@@ -98,13 +87,13 @@ class DotEnv():
if mapping.key is not None:
yield mapping.key, mapping.value
- def set_as_environment_variables(self, override=False):
- # type: (bool) -> bool
+ def set_as_environment_variables(self):
+ # type: () -> bool
"""
Load the current dotenv as system environemt variable.
"""
for k, v in self.dict().items():
- if k in os.environ and not override:
+ if k in os.environ and not self.override:
continue
if v is not None:
os.environ[to_env(k)] = to_env(v)
@@ -217,27 +206,26 @@ def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
return removed, key_to_unset
-def resolve_nested_variables(values):
- # type: (Iterable[Tuple[Text, Optional[Text]]]) -> Dict[Text, Optional[Text]]
- def _replacement(name, default):
- # type: (Text, Optional[Text]) -> Text
- default = default if default is not None else ""
- ret = new_values.get(name, os.getenv(name, default))
- return ret # type: ignore
+def resolve_variables(values, override):
+ # type: (Iterable[Tuple[Text, Optional[Text]]], bool) -> Mapping[Text, Optional[Text]]
- def _re_sub_callback(match):
- # type: (Match[Text]) -> Text
- """
- From a match object gets the variable name and returns
- the correct replacement
- """
- matches = match.groupdict()
- return _replacement(name=matches["name"], default=matches["default"]) # type: ignore
+ new_values = {} # type: Dict[Text, Optional[Text]]
- new_values = {}
+ for (name, value) in values:
+ if value is None:
+ result = None
+ else:
+ atoms = parse_variables(value)
+ env = {} # type: Dict[Text, Optional[Text]]
+ if override:
+ env.update(os.environ) # type: ignore
+ env.update(new_values)
+ else:
+ env.update(new_values)
+ env.update(os.environ) # type: ignore
+ result = "".join(atom.resolve(env) for atom in atoms)
- for (k, v) in values:
- new_values[k] = __posix_variable.sub(_re_sub_callback, v) if v is not None else None
+ new_values[name] = result
return new_values
@@ -316,10 +304,11 @@ def load_dotenv(dotenv_path=None, stream=None, verbose=False, override=False, in
Defaults to `False`.
"""
f = dotenv_path or stream or find_dotenv()
- return DotEnv(f, verbose=verbose, interpolate=interpolate, **kwargs).set_as_environment_variables(override=override)
+ dotenv = DotEnv(f, verbose=verbose, interpolate=interpolate, override=override, **kwargs)
+ return dotenv.set_as_environment_variables()
def dotenv_values(dotenv_path=None, stream=None, verbose=False, interpolate=True, **kwargs):
# type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, Union[None, Text]) -> Dict[Text, Optional[Text]] # noqa: E501
f = dotenv_path or stream or find_dotenv()
- return DotEnv(f, verbose=verbose, interpolate=interpolate, **kwargs).dict()
+ return DotEnv(f, verbose=verbose, interpolate=interpolate, override=True, **kwargs).dict()
diff --git a/src/dotenv/variables.py b/src/dotenv/variables.py
new file mode 100644
index 0000000..4828dfc
--- /dev/null
+++ b/src/dotenv/variables.py
@@ -0,0 +1,106 @@
+import re
+from abc import ABCMeta
+
+from .compat import IS_TYPE_CHECKING
+
+if IS_TYPE_CHECKING:
+ from typing import Iterator, Mapping, Optional, Pattern, Text
+
+
+_posix_variable = re.compile(
+ r"""
+ \$\{
+ (?P<name>[^\}:]*)
+ (?::-
+ (?P<default>[^\}]*)
+ )?
+ \}
+ """,
+ re.VERBOSE,
+) # type: Pattern[Text]
+
+
+class Atom():
+ __metaclass__ = ABCMeta
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ result = self.__eq__(other)
+ if result is NotImplemented:
+ return NotImplemented
+ return not result
+
+ def resolve(self, env):
+ # type: (Mapping[Text, Optional[Text]]) -> Text
+ raise NotImplementedError
+
+
+class Literal(Atom):
+ def __init__(self, value):
+ # type: (Text) -> None
+ self.value = value
+
+ def __repr__(self):
+ # type: () -> str
+ return "Literal(value={})".format(self.value)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.value == other.value
+
+ def __hash__(self):
+ # type: () -> int
+ return hash((self.__class__, self.value))
+
+ def resolve(self, env):
+ # type: (Mapping[Text, Optional[Text]]) -> Text
+ return self.value
+
+
+class Variable(Atom):
+ def __init__(self, name, default):
+ # type: (Text, Optional[Text]) -> None
+ self.name = name
+ self.default = default
+
+ def __repr__(self):
+ # type: () -> str
+ return "Variable(name={}, default={})".format(self.name, self.default)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return (self.name, self.default) == (other.name, other.default)
+
+ def __hash__(self):
+ # type: () -> int
+ return hash((self.__class__, self.name, self.default))
+
+ def resolve(self, env):
+ # type: (Mapping[Text, Optional[Text]]) -> Text
+ default = self.default if self.default is not None else ""
+ result = env.get(self.name, default)
+ return result if result is not None else ""
+
+
+def parse_variables(value):
+ # type: (Text) -> Iterator[Atom]
+ cursor = 0
+
+ for match in _posix_variable.finditer(value):
+ (start, end) = match.span()
+ name = match.groupdict()["name"]
+ default = match.groupdict()["default"]
+
+ if start > cursor:
+ yield Literal(value=value[cursor:start])
+
+ yield Variable(name=name, default=default)
+ cursor = end
+
+ length = len(value)
+ if cursor < length:
+ yield Literal(value=value[cursor:length])
|
theskumar/python-dotenv
|
e13d957bf48224453c5d9d9a7a83a13b999e0196
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 6b9458d..b927d7f 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -257,6 +257,28 @@ def test_load_dotenv_existing_variable_override(dotenv_file):
assert os.environ == {"a": "b"}
[email protected](os.environ, {"a": "c"}, clear=True)
+def test_load_dotenv_redefine_var_used_in_file_no_override(dotenv_file):
+ with open(dotenv_file, "w") as f:
+ f.write('a=b\nd="${a}"')
+
+ result = dotenv.load_dotenv(dotenv_file)
+
+ assert result is True
+ assert os.environ == {"a": "c", "d": "c"}
+
+
[email protected](os.environ, {"a": "c"}, clear=True)
+def test_load_dotenv_redefine_var_used_in_file_with_override(dotenv_file):
+ with open(dotenv_file, "w") as f:
+ f.write('a=b\nd="${a}"')
+
+ result = dotenv.load_dotenv(dotenv_file, override=True)
+
+ assert result is True
+ assert os.environ == {"a": "b", "d": "b"}
+
+
@mock.patch.dict(os.environ, {}, clear=True)
def test_load_dotenv_utf_8():
stream = StringIO("a=à")
diff --git a/tests/test_variables.py b/tests/test_variables.py
new file mode 100644
index 0000000..86b0646
--- /dev/null
+++ b/tests/test_variables.py
@@ -0,0 +1,35 @@
+import pytest
+
+from dotenv.variables import Literal, Variable, parse_variables
+
+
[email protected](
+ "value,expected",
+ [
+ ("", []),
+ ("a", [Literal(value="a")]),
+ ("${a}", [Variable(name="a", default=None)]),
+ ("${a:-b}", [Variable(name="a", default="b")]),
+ (
+ "${a}${b}",
+ [
+ Variable(name="a", default=None),
+ Variable(name="b", default=None),
+ ],
+ ),
+ (
+ "a${b}c${d}e",
+ [
+ Literal(value="a"),
+ Variable(name="b", default=None),
+ Literal(value="c"),
+ Variable(name="d", default=None),
+ Literal(value="e"),
+ ],
+ ),
+ ]
+)
+def test_parse_variables(value, expected):
+ result = parse_variables(value)
+
+ assert list(result) == expected
|
Inconsistency in priorities of variable expansion
# Behavior
python-dotenv priorities are correctly documented, but inconsistent when loading a .env file with internal dependencies in variable expansion.
Consider the following `.env` file:
```
HOSTNAME=localhost:8080
URL=http://${HOSTNAME}
```
Currently, if we set the `HOSTNAME` environment variable to `localhost:8081`, then run `load_dotenv()`, then the environment ends up with `HOSTNAME=localhost:8081` and `URL=http://localhost8080`, which at first is surprising.
# Underlying reason
The reason for this is that (by default) `load_dotenv()` does not override set environment variables, but when doing variable expansion inside a given file, `load_dotenv`prefers the variable as set in the file than from the environment (as documented in https://github.com/theskumar/python-dotenv#usages).
# Workaround
A workaround consists of changing the priorities in the file explicitly, as in this example:
```
HOSTNAME=${HOSTNAME:-localhost:8080}
URL=http://${HOSTNAME}
```
# Suggestion
I would suggest that the variable expansion priority logic follow the same behavior as the general "overriding or not" behavior. More precisely:
- when `override==False`, variable expansion should follow the following priorities:
- Value of that variable in the environment.
- Value of that variable in the .env file.
- Default value, if provided.
- Empty string.
- when `override==True`, variable expansion should follow the currently documented priorities:
- Value of that variable in the .env file.
- Value of that variable in the environment.
- Default value, if provided.
- Empty string.
It would consist of a breaking change in behavior, but it seems that in the case it does matter, the current behavior is probably not what is expected.
Is that a change you'd be willing to accept?
|
0.0
|
e13d957bf48224453c5d9d9a7a83a13b999e0196
|
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=\"\"\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a=\"b\"\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a=\"b'c\"\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a=\"b\\\\\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a=\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a=\"c\"\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a=\"c\"\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a=\"e\"\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc=\"g\"\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc=\"d\"\\n]",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override",
"tests/test_main.py::test_load_dotenv_utf_8",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_stream[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_stream[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_stream[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_stream[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_stream[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_stream[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_stream[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_stream[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_stream[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_stream[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_stream[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_stream[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_stream[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_stream[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_stream[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_stream[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_stream[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_stream[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_stream[env18-a=b\\na=c\\nd=${a}-True-expected18]",
"tests/test_main.py::test_dotenv_values_stream[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]",
"tests/test_variables.py::test_parse_variables[-expected0]",
"tests/test_variables.py::test_parse_variables[a-expected1]",
"tests/test_variables.py::test_parse_variables[${a}-expected2]",
"tests/test_variables.py::test_parse_variables[${a:-b}-expected3]",
"tests/test_variables.py::test_parse_variables[${a}${b}-expected4]",
"tests/test_variables.py::test_parse_variables[a${b}c${d}e-expected5]"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-15 14:30:17+00:00
|
bsd-3-clause
| 5,887 |
|
theskumar__python-dotenv-348
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f52cf07..cea2053 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,16 +7,19 @@ project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
-### Added
-
-- The `dotenv_path` argument of `set_key` and `unset_key` now has a type of `Union[str,
- os.PathLike]` instead of just `os.PathLike` (#347 by [@bbc2]).
-
### Changed
- Require Python 3.5 or a later version. Python 2 and 3.4 are no longer supported. (#341
by [@bbc2]).
+### Added
+
+- The `dotenv_path` argument of `set_key` and `unset_key` now has a type of `Union[str,
+ os.PathLike]` instead of just `os.PathLike` (#347 by [@bbc2]).
+- The `stream` argument of `load_dotenv` and `dotenv_values` can now be a text stream
+ (`IO[str]`), which includes values like `io.StringIO("foo")` and `open("file.env",
+ "r")` (#348 by [@bbc2]).
+
## [0.18.0] - 2021-06-20
### Changed
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index d550f6f..b8d0a4e 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -33,13 +33,15 @@ def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding
class DotEnv():
def __init__(
self,
- dotenv_path: Union[str, _PathLike, io.StringIO],
+ dotenv_path: Optional[Union[str, _PathLike]],
+ stream: Optional[IO[str]] = None,
verbose: bool = False,
encoding: Union[None, str] = None,
interpolate: bool = True,
override: bool = True,
) -> None:
- self.dotenv_path = dotenv_path # type: Union[str,_PathLike, io.StringIO]
+ self.dotenv_path = dotenv_path # type: Optional[Union[str, _PathLike]]
+ self.stream = stream # type: Optional[IO[str]]
self._dict = None # type: Optional[Dict[str, Optional[str]]]
self.verbose = verbose # type: bool
self.encoding = encoding # type: Union[None, str]
@@ -48,14 +50,17 @@ class DotEnv():
@contextmanager
def _get_stream(self) -> Iterator[IO[str]]:
- if isinstance(self.dotenv_path, io.StringIO):
- yield self.dotenv_path
- elif os.path.isfile(self.dotenv_path):
+ if self.dotenv_path and os.path.isfile(self.dotenv_path):
with io.open(self.dotenv_path, encoding=self.encoding) as stream:
yield stream
+ elif self.stream is not None:
+ yield self.stream
else:
if self.verbose:
- logger.info("Python-dotenv could not find configuration file %s.", self.dotenv_path or '.env')
+ logger.info(
+ "Python-dotenv could not find configuration file %s.",
+ self.dotenv_path or '.env',
+ )
yield io.StringIO('')
def dict(self) -> Dict[str, Optional[str]]:
@@ -290,7 +295,7 @@ def find_dotenv(
def load_dotenv(
dotenv_path: Union[str, _PathLike, None] = None,
- stream: Optional[io.StringIO] = None,
+ stream: Optional[IO[str]] = None,
verbose: bool = False,
override: bool = False,
interpolate: bool = True,
@@ -299,7 +304,8 @@ def load_dotenv(
"""Parse a .env file and then load all the variables found as environment variables.
- *dotenv_path*: absolute or relative path to .env file.
- - *stream*: `StringIO` object with .env content, used if `dotenv_path` is `None`.
+ - *stream*: Text stream (such as `io.StringIO`) with .env content, used if
+ `dotenv_path` is `None`.
- *verbose*: whether to output a warning the .env file is missing. Defaults to
`False`.
- *override*: whether to override the system environment variables with the variables
@@ -308,9 +314,12 @@ def load_dotenv(
If both `dotenv_path` and `stream`, `find_dotenv()` is used to find the .env file.
"""
- f = dotenv_path or stream or find_dotenv()
+ if dotenv_path is None and stream is None:
+ dotenv_path = find_dotenv()
+
dotenv = DotEnv(
- f,
+ dotenv_path=dotenv_path,
+ stream=stream,
verbose=verbose,
interpolate=interpolate,
override=override,
@@ -321,7 +330,7 @@ def load_dotenv(
def dotenv_values(
dotenv_path: Union[str, _PathLike, None] = None,
- stream: Optional[io.StringIO] = None,
+ stream: Optional[IO[str]] = None,
verbose: bool = False,
interpolate: bool = True,
encoding: Optional[str] = "utf-8",
@@ -338,9 +347,12 @@ def dotenv_values(
If both `dotenv_path` and `stream`, `find_dotenv()` is used to find the .env file.
"""
- f = dotenv_path or stream or find_dotenv()
+ if dotenv_path is None and stream is None:
+ dotenv_path = find_dotenv()
+
return DotEnv(
- f,
+ dotenv_path=dotenv_path,
+ stream=stream,
verbose=verbose,
interpolate=interpolate,
override=True,
|
theskumar/python-dotenv
|
955e2a4ea6391a322c779e737f5a7aca7eaa963d
|
diff --git a/tests/test_main.py b/tests/test_main.py
index d612bb2..13e2791 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -277,7 +277,7 @@ def test_load_dotenv_redefine_var_used_in_file_with_override(dotenv_file):
@mock.patch.dict(os.environ, {}, clear=True)
-def test_load_dotenv_utf_8():
+def test_load_dotenv_string_io_utf_8():
stream = io.StringIO("a=à")
result = dotenv.load_dotenv(stream=stream)
@@ -286,6 +286,18 @@ def test_load_dotenv_utf_8():
assert os.environ == {"a": "à"}
[email protected](os.environ, {}, clear=True)
+def test_load_dotenv_file_stream(dotenv_file):
+ with open(dotenv_file, "w") as f:
+ f.write("a=b")
+
+ with open(dotenv_file, "r") as f:
+ result = dotenv.load_dotenv(stream=f)
+
+ assert result is True
+ assert os.environ == {"a": "b"}
+
+
def test_load_dotenv_in_current_dir(tmp_path):
dotenv_path = tmp_path / '.env'
dotenv_path.write_bytes(b'a=b')
@@ -353,7 +365,7 @@ def test_dotenv_values_file(dotenv_file):
({}, "a=b\nc=${a}\nd=e\nc=${d}", True, {"a": "b", "c": "e", "d": "e"}),
],
)
-def test_dotenv_values_stream(env, string, interpolate, expected):
+def test_dotenv_values_string_io(env, string, interpolate, expected):
with mock.patch.dict(os.environ, env, clear=True):
stream = io.StringIO(string)
stream.seek(0)
@@ -361,3 +373,13 @@ def test_dotenv_values_stream(env, string, interpolate, expected):
result = dotenv.dotenv_values(stream=stream, interpolate=interpolate)
assert result == expected
+
+
+def test_dotenv_values_file_stream(dotenv_file):
+ with open(dotenv_file, "w") as f:
+ f.write("a=b")
+
+ with open(dotenv_file, "r") as f:
+ result = dotenv.dotenv_values(stream=f)
+
+ assert result == {"a": "b"}
|
Stream as any text file?
As noted before
#208
#156
the code spec
https://github.com/theskumar/python-dotenv/blob/a7fe93f6cc73ab9de28191e3854f1a713d53363b/src/dotenv/main.py#L301
is unusually restrictive.
It'd be useful to accept any text file as a stream.
My use case would be packaging a `.env.defaults` file opened by [importlib.resources.open_text](https://docs.python.org/3/library/importlib.html#importlib.resources.open_text), so you can uniformly manage/document environment & defaults via `.env` files.
|
0.0
|
955e2a4ea6391a322c779e737f5a7aca7eaa963d
|
[
"tests/test_main.py::test_load_dotenv_file_stream",
"tests/test_main.py::test_dotenv_values_file_stream"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=''\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override",
"tests/test_main.py::test_load_dotenv_string_io_utf_8",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]",
"tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-14 09:01:18+00:00
|
bsd-3-clause
| 5,888 |
|
theskumar__python-dotenv-361
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6b2b2bb..811ed1a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this
project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## Unreleased
+
+### Fixed
+
+- In `set_key`, add missing newline character before new entry if necessary. (#361 by
+ [@bbc2])
+
## [0.19.1] - 2021-08-09
### Added
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index b8d0a4e..d867f02 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -167,13 +167,17 @@ def set_key(
with rewrite(dotenv_path) as (source, dest):
replaced = False
+ missing_newline = False
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
if mapping.key == key_to_set:
dest.write(line_out)
replaced = True
else:
dest.write(mapping.original.string)
+ missing_newline = not mapping.original.string.endswith("\n")
if not replaced:
+ if missing_newline:
+ dest.write("\n")
dest.write(line_out)
return True, key_to_set, value_to_set
|
theskumar/python-dotenv
|
fc138ce8a430b758f4f2c89bc8104f259e2cba38
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 13e2791..541ac5e 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -37,6 +37,7 @@ def test_set_key_no_file(tmp_path):
("a=b\nc=d", "a", "e", (True, "a", "e"), "a='e'\nc=d"),
("a=b\nc=d\ne=f", "c", "g", (True, "c", "g"), "a=b\nc='g'\ne=f"),
("a=b\n", "c", "d", (True, "c", "d"), "a=b\nc='d'\n"),
+ ("a=b", "c", "d", (True, "c", "d"), "a=b\nc='d'\n"),
],
)
def test_set_key(dotenv_file, before, key, value, expected, after):
|
set_key does not check if there is a carriage return before inserting new key
Example script :
```
import os
import dotenv
DOTENV_FILE = os.path.join(os.path.dirname(__file__), ".env")
def init_dotenv():
with open(DOTENV_FILE, "w") as fp:
fp.write("TEST=ABCD")
def print_dotenv():
with open(DOTENV_FILE) as fp:
content = fp.read()
print("-" * 30, "DOTENV", "-" * 30)
print(content)
print()
init_dotenv()
print_dotenv()
dotenv.set_key(DOTENV_FILE, "FOO", "1234")
print_dotenv()
```
Output
```
------------------------------ DOTENV ------------------------------
TEST=ABCD
------------------------------ DOTENV ------------------------------
TEST=ABCDFOO='1234'
```
Is this something you want to fix ?
|
0.0
|
fc138ce8a430b758f4f2c89bc8104f259e2cba38
|
[
"tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=''\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override",
"tests/test_main.py::test_load_dotenv_string_io_utf_8",
"tests/test_main.py::test_load_dotenv_file_stream",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]",
"tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]",
"tests/test_main.py::test_dotenv_values_file_stream"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-23 09:42:48+00:00
|
bsd-3-clause
| 5,889 |
|
theskumar__python-dotenv-379
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9b18856..3d4d014 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this
project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## Unreleased
+
+### Added
+
+- Add `encoding` (`Optional[str]`) parameter to `get_key`, `set_key` and `unset_key`.
+ (#379 by [@bbc2])
+
## [0.19.2] - 2021-11-11
### Fixed
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index d867f02..20ac61b 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -109,23 +109,30 @@ class DotEnv():
return None
-def get_key(dotenv_path: Union[str, _PathLike], key_to_get: str) -> Optional[str]:
+def get_key(
+ dotenv_path: Union[str, _PathLike],
+ key_to_get: str,
+ encoding: Optional[str] = "utf-8",
+) -> Optional[str]:
"""
- Gets the value of a given key from the given .env
+ Get the value of a given key from the given .env.
- If the .env path given doesn't exist, fails
+ Returns `None` if the key isn't found or doesn't have a value.
"""
- return DotEnv(dotenv_path, verbose=True).get(key_to_get)
+ return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get)
@contextmanager
-def rewrite(path: Union[str, _PathLike]) -> Iterator[Tuple[IO[str], IO[str]]]:
+def rewrite(
+ path: Union[str, _PathLike],
+ encoding: Optional[str],
+) -> Iterator[Tuple[IO[str], IO[str]]]:
try:
if not os.path.isfile(path):
- with io.open(path, "w+") as source:
+ with io.open(path, "w+", encoding=encoding) as source:
source.write("")
- with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest:
- with io.open(path) as source:
+ with tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding) as dest:
+ with io.open(path, encoding=encoding) as source:
yield (source, dest) # type: ignore
except BaseException:
if os.path.isfile(dest.name):
@@ -141,6 +148,7 @@ def set_key(
value_to_set: str,
quote_mode: str = "always",
export: bool = False,
+ encoding: Optional[str] = "utf-8",
) -> Tuple[Optional[bool], str, str]:
"""
Adds or Updates a key/value to the given .env
@@ -165,7 +173,7 @@ def set_key(
else:
line_out = "{}={}\n".format(key_to_set, value_out)
- with rewrite(dotenv_path) as (source, dest):
+ with rewrite(dotenv_path, encoding=encoding) as (source, dest):
replaced = False
missing_newline = False
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
@@ -187,6 +195,7 @@ def unset_key(
dotenv_path: Union[str, _PathLike],
key_to_unset: str,
quote_mode: str = "always",
+ encoding: Optional[str] = "utf-8",
) -> Tuple[Optional[bool], str]:
"""
Removes a given key from the given .env
@@ -199,7 +208,7 @@ def unset_key(
return None, key_to_unset
removed = False
- with rewrite(dotenv_path) as (source, dest):
+ with rewrite(dotenv_path, encoding=encoding) as (source, dest):
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
if mapping.key == key_to_unset:
removed = True
|
theskumar/python-dotenv
|
ba9408c5048e8e512318df423541d2b44ac6019f
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 541ac5e..364fc24 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -53,6 +53,15 @@ def test_set_key(dotenv_file, before, key, value, expected, after):
mock_warning.assert_not_called()
+def test_set_key_encoding(dotenv_file):
+ encoding = "latin-1"
+
+ result = dotenv.set_key(dotenv_file, "a", "é", encoding=encoding)
+
+ assert result == (True, "a", "é")
+ assert open(dotenv_file, "r", encoding=encoding).read() == "a='é'\n"
+
+
def test_set_key_permission_error(dotenv_file):
os.chmod(dotenv_file, 0o000)
@@ -107,6 +116,16 @@ def test_get_key_ok(dotenv_file):
mock_warning.assert_not_called()
+def test_get_key_encoding(dotenv_file):
+ encoding = "latin-1"
+ with open(dotenv_file, "w", encoding=encoding) as f:
+ f.write("é=è")
+
+ result = dotenv.get_key(dotenv_file, "é", encoding=encoding)
+
+ assert result == "è"
+
+
def test_get_key_none(dotenv_file):
logger = logging.getLogger("dotenv.main")
with open(dotenv_file, "w") as f:
@@ -147,6 +166,18 @@ def test_unset_no_value(dotenv_file):
mock_warning.assert_not_called()
+def test_unset_encoding(dotenv_file):
+ encoding = "latin-1"
+ with open(dotenv_file, "w", encoding=encoding) as f:
+ f.write("é=x")
+
+ result = dotenv.unset_key(dotenv_file, "é", encoding=encoding)
+
+ assert result == (True, "é")
+ with open(dotenv_file, "r", encoding=encoding) as f:
+ assert f.read() == ""
+
+
def test_unset_non_existent_file(tmp_path):
nx_file = str(tmp_path / "nx")
logger = logging.getLogger("dotenv.main")
|
encoding not an option using set_key
You can create and read a .env using the main.DotEnv class with an encoding, but the functions of setting key/unsetting do not act accordingly with the encoding set nor have an option to set it. - They both use the rewrite contextmanager
|
0.0
|
ba9408c5048e8e512318df423541d2b44ac6019f
|
[
"tests/test_main.py::test_set_key_encoding",
"tests/test_main.py::test_get_key_encoding",
"tests/test_main.py::test_unset_encoding"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=''\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]",
"tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override",
"tests/test_main.py::test_load_dotenv_string_io_utf_8",
"tests/test_main.py::test_load_dotenv_file_stream",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]",
"tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]",
"tests/test_main.py::test_dotenv_values_file_stream"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-02-20 10:09:15+00:00
|
bsd-3-clause
| 5,890 |
|
theskumar__python-dotenv-388
|
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index e7ad430..7841066 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -87,6 +87,9 @@ class DotEnv():
"""
Load the current dotenv as system environment variable.
"""
+ if not self.dict():
+ return False
+
for k, v in self.dict().items():
if k in os.environ and not self.override:
continue
@@ -324,6 +327,8 @@ def load_dotenv(
override: Whether to override the system environment variables with the variables
from the `.env` file.
encoding: Encoding to be used to read the file.
+ Returns:
+ Bool: True if atleast one environment variable is set elese False
If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
.env file.
|
theskumar/python-dotenv
|
29bceb836965de5bc498af401fd9d2e95194a5c1
|
diff --git a/tests/test_main.py b/tests/test_main.py
index ca14b1a..82c73ba 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -259,8 +259,9 @@ def test_load_dotenv_no_file_verbose():
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "info") as mock_info:
- dotenv.load_dotenv('.does_not_exist', verbose=True)
+ result = dotenv.load_dotenv('.does_not_exist', verbose=True)
+ assert result is False
mock_info.assert_called_once_with("Python-dotenv could not find configuration file %s.", ".does_not_exist")
|
load_dotenv() returns True even if .env file is not found
This behaviour is not ideal since if the configuration file is not found in the filesystem it may be better to return False.
In this way the user can write something like:
```python
if load_dotenv():
#do stuff
else
print("No file .env found")
```
# Steps to reproduce
```
In [1]: import dotenv
In [2]: dotenv.load_dotenv()
Out[2]: True
In [3]: dotenv.find_dotenv()
Out[3]: ''
```
|
0.0
|
29bceb836965de5bc498af401fd9d2e95194a5c1
|
[
"tests/test_main.py::test_load_dotenv_no_file_verbose"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=''\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]",
"tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]",
"tests/test_main.py::test_set_key_encoding",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_encoding",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_encoding",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override",
"tests/test_main.py::test_load_dotenv_string_io_utf_8",
"tests/test_main.py::test_load_dotenv_file_stream",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]",
"tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]",
"tests/test_main.py::test_dotenv_values_file_stream"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-03-15 01:49:41+00:00
|
bsd-3-clause
| 5,891 |
|
theskumar__python-dotenv-407
|
diff --git a/README.md b/README.md
index eb6bb53..a9d19bf 100644
--- a/README.md
+++ b/README.md
@@ -146,6 +146,11 @@ $ dotenv set EMAIL [email protected]
$ dotenv list
USER=foo
[email protected]
+$ dotenv list --format=json
+{
+ "USER": "foo",
+ "EMAIL": "[email protected]"
+}
$ dotenv run -- python foo.py
```
diff --git a/src/dotenv/cli.py b/src/dotenv/cli.py
index 3411e34..b845b95 100644
--- a/src/dotenv/cli.py
+++ b/src/dotenv/cli.py
@@ -1,4 +1,6 @@
+import json
import os
+import shlex
import sys
from subprocess import Popen
from typing import Any, Dict, List
@@ -36,7 +38,11 @@ def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None:
@cli.command()
@click.pass_context
-def list(ctx: click.Context) -> None:
[email protected]('--format', default='simple',
+ type=click.Choice(['simple', 'json', 'shell', 'export']),
+ help="The format in which to display the list. Default format is simple, "
+ "which displays name=value without quotes.")
+def list(ctx: click.Context, format: bool) -> None:
'''Display all the stored key/value.'''
file = ctx.obj['FILE']
if not os.path.isfile(file):
@@ -45,8 +51,16 @@ def list(ctx: click.Context) -> None:
ctx=ctx
)
dotenv_as_dict = dotenv_values(file)
- for k, v in dotenv_as_dict.items():
- click.echo('%s=%s' % (k, v))
+ if format == 'json':
+ click.echo(json.dumps(dotenv_as_dict, indent=2, sort_keys=True))
+ else:
+ prefix = 'export ' if format == 'export' else ''
+ for k in sorted(dotenv_as_dict):
+ v = dotenv_as_dict[k]
+ if v is not None:
+ if format in ('export', 'shell'):
+ v = shlex.quote(v)
+ click.echo('%s%s=%s' % (prefix, k, v))
@cli.command()
|
theskumar/python-dotenv
|
2f36c082c278bad1a84411f1ad61547f95cecdb8
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 223476f..ca5ba2a 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -2,19 +2,38 @@ import os
import pytest
import sh
-
+from typing import Optional
import dotenv
from dotenv.cli import cli as dotenv_cli
from dotenv.version import __version__
-def test_list(cli, dotenv_file):
[email protected](
+ "format,content,expected",
+ (
+ (None, "x='a b c'", '''x=a b c\n'''),
+ ("simple", "x='a b c'", '''x=a b c\n'''),
+ ("simple", """x='"a b c"'""", '''x="a b c"\n'''),
+ ("simple", '''x="'a b c'"''', '''x='a b c'\n'''),
+ ("json", "x='a b c'", '''{\n "x": "a b c"\n}\n'''),
+ ("shell", "x='a b c'", "x='a b c'\n"),
+ ("shell", """x='"a b c"'""", '''x='"a b c"'\n'''),
+ ("shell", '''x="'a b c'"''', '''x=''"'"'a b c'"'"''\n'''),
+ ("shell", "x='a\nb\nc'", "x='a\nb\nc'\n"),
+ ("export", "x='a b c'", '''export x='a b c'\n'''),
+ )
+)
+def test_list(cli, dotenv_file, format: Optional[str], content: str, expected: str):
with open(dotenv_file, "w") as f:
- f.write("a=b")
+ f.write(content + '\n')
+
+ args = ['--file', dotenv_file, 'list']
+ if format is not None:
+ args.extend(['--format', format])
- result = cli.invoke(dotenv_cli, ['--file', dotenv_file, 'list'])
+ result = cli.invoke(dotenv_cli, args)
- assert (result.exit_code, result.output) == (0, result.output)
+ assert (result.exit_code, result.output) == (0, expected)
def test_list_non_existent_file(cli):
|
--format= option for CLI list command
It would be nice to have a way to dump all variables as JSON, which can be used by many other tools. I'd propose addition of a --json or -j option on the "list" command.
I'd be happy to submit a pull request if it sounds interesting.
|
0.0
|
2f36c082c278bad1a84411f1ad61547f95cecdb8
|
[
"tests/test_cli.py::test_list[simple-x='a",
"tests/test_cli.py::test_list[simple-x='\"a",
"tests/test_cli.py::test_list[simple-x=\"'a",
"tests/test_cli.py::test_list[json-x='a",
"tests/test_cli.py::test_list[shell-x='a",
"tests/test_cli.py::test_list[shell-x='\"a",
"tests/test_cli.py::test_list[shell-x=\"'a",
"tests/test_cli.py::test_list[shell-x='a\\nb\\nc'-x='a\\nb\\nc'\\n]",
"tests/test_cli.py::test_list[export-x='a"
] |
[
"tests/test_cli.py::test_list[None-x='a",
"tests/test_cli.py::test_list_non_existent_file",
"tests/test_cli.py::test_list_no_file",
"tests/test_cli.py::test_get_existing_value",
"tests/test_cli.py::test_get_non_existent_value",
"tests/test_cli.py::test_get_no_file",
"tests/test_cli.py::test_unset_existing_value",
"tests/test_cli.py::test_unset_non_existent_value",
"tests/test_cli.py::test_set_quote_options[always-a-x-a='x'\\n]",
"tests/test_cli.py::test_set_quote_options[never-a-x-a=x\\n]",
"tests/test_cli.py::test_set_quote_options[auto-a-x-a=x\\n]",
"tests/test_cli.py::test_set_quote_options[auto-a-x",
"tests/test_cli.py::test_set_quote_options[auto-a-$-a='$'\\n]",
"tests/test_cli.py::test_set_export[.nx_file-true-a-x-export",
"tests/test_cli.py::test_set_export[.nx_file-false-a-x-a='x'\\n]",
"tests/test_cli.py::test_set_non_existent_file",
"tests/test_cli.py::test_set_no_file",
"tests/test_cli.py::test_run_with_other_env",
"tests/test_cli.py::test_run_without_cmd",
"tests/test_cli.py::test_run_with_invalid_cmd",
"tests/test_cli.py::test_run_with_version"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-19 20:16:19+00:00
|
bsd-3-clause
| 5,892 |
|
theskumar__python-dotenv-414
|
diff --git a/README.md b/README.md
index a9d19bf..983b7d1 100644
--- a/README.md
+++ b/README.md
@@ -163,7 +163,7 @@ The format is not formally specified and still improves over time. That being s
Keys can be unquoted or single-quoted. Values can be unquoted, single- or double-quoted.
Spaces before and after keys, equal signs, and values are ignored. Values can be followed
-by a comment. Lines can start with the `export` directive, which has no effect on their
+by a comment. Lines can start with the `export` directive, which does not affect their
interpretation.
Allowed escape sequences:
diff --git a/src/dotenv/main.py b/src/dotenv/main.py
index 05d377a..3321788 100644
--- a/src/dotenv/main.py
+++ b/src/dotenv/main.py
@@ -125,15 +125,16 @@ def rewrite(
path: Union[str, os.PathLike],
encoding: Optional[str],
) -> Iterator[Tuple[IO[str], IO[str]]]:
+ dest = None
try:
if not os.path.isfile(path):
with open(path, "w+", encoding=encoding) as source:
source.write("")
- with tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding) as dest:
- with open(path, encoding=encoding) as source:
- yield (source, dest) # type: ignore
+ dest = tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding=encoding)
+ with open(path, encoding=encoding) as source:
+ yield (source, dest) # type: ignore
except BaseException:
- if os.path.isfile(dest.name):
+ if dest and os.path.isfile(dest.name):
os.unlink(dest.name)
raise
else:
|
theskumar/python-dotenv
|
914c68ef0e4c2c085d2753f5cbbf304852f37850
|
diff --git a/tests/test_main.py b/tests/test_main.py
index 82c73ba..84a982f 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -22,6 +22,11 @@ def test_set_key_no_file(tmp_path):
assert os.path.exists(nx_file)
+def test_set_key_invalid_file():
+ with pytest.raises(TypeError):
+ result = dotenv.set_key(None, "foo", "bar")
+
+
@pytest.mark.parametrize(
"before,key,value,expected,after",
[
|
Error Handling in rewrite is incorrect.
Look at:
https://github.com/theskumar/python-dotenv/blob/master/src/dotenv/main.py#L136
If lines 136, 137 or 140 there are ever hit, an error "local variable 'dest' referenced before assignment" will be thrown, because the `dest` variable only exists within the scope of the `with` block above.
|
0.0
|
914c68ef0e4c2c085d2753f5cbbf304852f37850
|
[
"tests/test_main.py::test_set_key_invalid_file"
] |
[
"tests/test_main.py::test_set_key_no_file",
"tests/test_main.py::test_set_key[-a--expected0-a=''\\n]",
"tests/test_main.py::test_set_key[-a-b-expected1-a='b'\\n]",
"tests/test_main.py::test_set_key[-a-'b'-expected2-a='\\\\'b\\\\''\\n]",
"tests/test_main.py::test_set_key[-a-\"b\"-expected3-a='\"b\"'\\n]",
"tests/test_main.py::test_set_key[-a-b'c-expected4-a='b\\\\'c'\\n]",
"tests/test_main.py::test_set_key[-a-b\"c-expected5-a='b\"c'\\n]",
"tests/test_main.py::test_set_key[a=b-a-c-expected6-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n-a-c-expected7-a='c'\\n]",
"tests/test_main.py::test_set_key[a=b\\n\\n-a-c-expected8-a='c'\\n\\n]",
"tests/test_main.py::test_set_key[a=b\\nc=d-a-e-expected9-a='e'\\nc=d]",
"tests/test_main.py::test_set_key[a=b\\nc=d\\ne=f-c-g-expected10-a=b\\nc='g'\\ne=f]",
"tests/test_main.py::test_set_key[a=b\\n-c-d-expected11-a=b\\nc='d'\\n]",
"tests/test_main.py::test_set_key[a=b-c-d-expected12-a=b\\nc='d'\\n]",
"tests/test_main.py::test_set_key_encoding",
"tests/test_main.py::test_get_key_no_file",
"tests/test_main.py::test_get_key_not_found",
"tests/test_main.py::test_get_key_ok",
"tests/test_main.py::test_get_key_encoding",
"tests/test_main.py::test_get_key_none",
"tests/test_main.py::test_unset_with_value",
"tests/test_main.py::test_unset_no_value",
"tests/test_main.py::test_unset_encoding",
"tests/test_main.py::test_unset_non_existent_file",
"tests/test_main.py::test_find_dotenv_no_file_raise",
"tests/test_main.py::test_find_dotenv_no_file_no_raise",
"tests/test_main.py::test_find_dotenv_found",
"tests/test_main.py::test_load_dotenv_existing_file",
"tests/test_main.py::test_load_dotenv_no_file_verbose",
"tests/test_main.py::test_load_dotenv_existing_variable_no_override",
"tests/test_main.py::test_load_dotenv_existing_variable_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_no_override",
"tests/test_main.py::test_load_dotenv_redefine_var_used_in_file_with_override",
"tests/test_main.py::test_load_dotenv_string_io_utf_8",
"tests/test_main.py::test_load_dotenv_file_stream",
"tests/test_main.py::test_load_dotenv_in_current_dir",
"tests/test_main.py::test_dotenv_values_file",
"tests/test_main.py::test_dotenv_values_string_io[env0-a=$b-False-expected0]",
"tests/test_main.py::test_dotenv_values_string_io[env1-a=$b-True-expected1]",
"tests/test_main.py::test_dotenv_values_string_io[env2-a=${b}-False-expected2]",
"tests/test_main.py::test_dotenv_values_string_io[env3-a=${b}-True-expected3]",
"tests/test_main.py::test_dotenv_values_string_io[env4-a=${b:-d}-False-expected4]",
"tests/test_main.py::test_dotenv_values_string_io[env5-a=${b:-d}-True-expected5]",
"tests/test_main.py::test_dotenv_values_string_io[env6-b=c\\na=${b}-True-expected6]",
"tests/test_main.py::test_dotenv_values_string_io[env7-a=${b}-True-expected7]",
"tests/test_main.py::test_dotenv_values_string_io[env8-a=${b:-d}-True-expected8]",
"tests/test_main.py::test_dotenv_values_string_io[env9-a=\"${b}\"-True-expected9]",
"tests/test_main.py::test_dotenv_values_string_io[env10-a='${b}'-True-expected10]",
"tests/test_main.py::test_dotenv_values_string_io[env11-a=x${b}y-True-expected11]",
"tests/test_main.py::test_dotenv_values_string_io[env12-a=${a}-True-expected12]",
"tests/test_main.py::test_dotenv_values_string_io[env13-a=${a}-True-expected13]",
"tests/test_main.py::test_dotenv_values_string_io[env14-a=${a:-c}-True-expected14]",
"tests/test_main.py::test_dotenv_values_string_io[env15-a=${a:-c}-True-expected15]",
"tests/test_main.py::test_dotenv_values_string_io[env16-a=${b}${b}-True-expected16]",
"tests/test_main.py::test_dotenv_values_string_io[env17-b=d\\na=${b}-True-expected17]",
"tests/test_main.py::test_dotenv_values_string_io[env18-a=b\\na=c\\nd=${a}-True-expected18]",
"tests/test_main.py::test_dotenv_values_string_io[env19-a=b\\nc=${a}\\nd=e\\nc=${d}-True-expected19]",
"tests/test_main.py::test_dotenv_values_file_stream"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-27 07:28:33+00:00
|
bsd-3-clause
| 5,893 |
|
theskumar__python-dotenv-52
|
diff --git a/README.rst b/README.rst
index 936a5a2..8b2a039 100644
--- a/README.rst
+++ b/README.rst
@@ -126,7 +126,8 @@ update your settings on remote server, handy isn't it!
file in current working directory.
-q, --quote [always|never|auto]
Whether to quote or not the variable values.
- Default mode is always.
+ Default mode is always. This does not affect
+ parsing.
--help Show this message and exit.
Commands:
diff --git a/dotenv/cli.py b/dotenv/cli.py
index 9a99314..125a0a8 100644
--- a/dotenv/cli.py
+++ b/dotenv/cli.py
@@ -11,7 +11,7 @@ from .main import get_key, dotenv_values, set_key, unset_key
help="Location of the .env file, defaults to .env file in current working directory.")
@click.option('-q', '--quote', default='always',
type=click.Choice(['always', 'never', 'auto']),
- help="Whether to quote or not the variable values. Default mode is always.")
+ help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.")
@click.pass_context
def cli(ctx, file, quote):
'''This script is used to set, get or unset values from a .env file.'''
diff --git a/dotenv/main.py b/dotenv/main.py
index 2fe1a83..ceac3fa 100644
--- a/dotenv/main.py
+++ b/dotenv/main.py
@@ -103,7 +103,7 @@ def parse_dotenv(dotenv_path):
k, v = k.strip(), v.strip()
if len(v) > 0:
- quoted = v[0] == v[len(v) - 1] == '"'
+ quoted = v[0] == v[len(v) - 1] in ['"', "'"]
if quoted:
v = decode_escaped(v[1:-1])
|
theskumar/python-dotenv
|
9552db8d8c25753ec4f1a724f64d895b9daa6296
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index d78172b..449b54a 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -46,6 +46,18 @@ def test_key_value_without_quotes():
sh.rm(dotenv_path)
+def test_value_with_quotes():
+ with open(dotenv_path, 'w') as f:
+ f.write('TEST="two words"\n')
+ assert dotenv.get_key(dotenv_path, 'TEST') == 'two words'
+ sh.rm(dotenv_path)
+
+ with open(dotenv_path, 'w') as f:
+ f.write("TEST='two words'\n")
+ assert dotenv.get_key(dotenv_path, 'TEST') == 'two words'
+ sh.rm(dotenv_path)
+
+
def test_unset():
sh.touch(dotenv_path)
success, key_to_set, value_to_set = dotenv.set_key(dotenv_path, 'HELLO', 'WORLD')
@@ -104,6 +116,13 @@ def test_get_key_with_interpolation(cli):
dotenv.set_key(dotenv_path, 'FOO', '${HELLO}')
dotenv.set_key(dotenv_path, 'BAR', 'CONCATENATED_${HELLO}_POSIX_VAR')
+ lines = list(open(dotenv_path, "r").readlines())
+ assert lines == [
+ 'HELLO="WORLD"\n',
+ 'FOO="${HELLO}"\n',
+ 'BAR="CONCATENATED_${HELLO}_POSIX_VAR"\n',
+ ]
+
# test replace from variable in file
stored_value = dotenv.get_key(dotenv_path, 'FOO')
assert stored_value == 'WORLD'
|
Wrong parsing of env variables in single quotes
I have the following `.env` file:
```
DATABASE_URL='postgres://localhost:5432/myapp_development'
```
When I run `dotenv get DATABASE_URL` this is what I get:
`DATABASE_URL="'postgres://localhost:5432/simulator_development'"`
When I try to use this with [dj-database-url](https://github.com/kennethreitz/dj-database-url) it is failing to parse the `DATABASE_URL` environment variable as it is.
It seems using single quotes in the `.env` file is causing this.
It would be nice if this were documented somewhere if the behavior is intended
I spent quite a bit of time trying to figure out where the error was.
Thanks 😃
|
0.0
|
9552db8d8c25753ec4f1a724f64d895b9daa6296
|
[
"tests/test_cli.py::test_value_with_quotes"
] |
[
"tests/test_cli.py::test_get_key",
"tests/test_cli.py::test_list",
"tests/test_cli.py::test_key_value_without_quotes",
"tests/test_cli.py::test_unset",
"tests/test_cli.py::test_console_script",
"tests/test_cli.py::test_get_key_with_interpolation",
"tests/test_cli.py::test_get_key_with_interpolation_of_unset_variable"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-30 09:50:38+00:00
|
bsd-3-clause
| 5,894 |
|
thinkingmachines__geomancer-48
|
diff --git a/geomancer/spells/__init__.py b/geomancer/spells/__init__.py
index 92e6755..501b5d4 100644
--- a/geomancer/spells/__init__.py
+++ b/geomancer/spells/__init__.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from .distance_to_nearest import DistanceToNearest
+from .length_of import LengthOf
from .number_of import NumberOf
-
-__all__ = ["DistanceToNearest", "NumberOf"]
+__all__ = ["DistanceToNearest", "NumberOf", "LengthOf"]
diff --git a/geomancer/spells/length_of.py b/geomancer/spells/length_of.py
new file mode 100644
index 0000000..2bb308f
--- /dev/null
+++ b/geomancer/spells/length_of.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+
+# Import modules
+from sqlalchemy import func
+from sqlalchemy.sql import select
+
+from .base import Spell
+from ..backend.cores.bq import BigQueryCore
+
+from loguru import logger
+
+
+class LengthOf(Spell):
+ """Obtain the length of all Lines-of-Interest within a certain radius"""
+
+ def __init__(self, on, within=10 * 1000, **kwargs):
+ """Spell constructor
+
+ Parameters
+ ----------
+ on : str
+ Feature class to compare upon
+ within : float, optional
+ Look for values within a particular range. Its value is in meters,
+ the default is :code:`10,000` meters.
+ source_table : str
+ Table URI to run queries against.
+ feature_name : str
+ Column name for the output feature.
+ column : str, optional
+ Column to look the geometries from. The default is :code:`WKT`
+ options : geomancer.Config
+ Specify configuration for interacting with the database backend.
+ Default is a BigQuery Configuration
+ """
+ super(LengthOf, self).__init__(**kwargs)
+ logger.warning(
+ "ST_Buffer is not yet implemented so BigQueryCore won't work: groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ"
+ )
+ self.source_column, self.source_filter = self.extract_columns(on)
+ self.within = within
+
+ def query(self, source, target, core, column):
+ # ST_Buffer is not yet implemented so BigQueryCore won't work
+ # (groups.google.com/d/msg/bq-gis-feedback/Yq4Ku6u2A80/ceVXU01RCgAJ)
+ if isinstance(core, BigQueryCore):
+ raise ValueError(
+ "The LengthOf feature is currently incompatible with \
+ BigQueryCore because ST_Buffer is not yet implemented"
+ )
+
+ # Get all lines-of-interests (LOIs) of fclass `on`
+ lois = select(
+ [source.c[self.source_id], source.c.WKT],
+ source.c[self.source_column] == self.source_filter,
+ ).cte("lois")
+
+ # Create a buffer `within` a distance/radius around each centroid.
+ # The point has to be converted to EPSG:3857 so that meters can be
+ # used instead of decimal degrees for EPSG:4326.
+ buff = select(
+ [
+ target,
+ func.ST_Buffer(
+ core.ST_GeoFromText(target.c[column]), self.within
+ ).label("__buffer__"),
+ ]
+ ).cte("buff")
+
+ # Clip the LOIs with the buffers then calculate the length of all
+ # LOIs inside each buffer.
+ clip = select(
+ [
+ buff,
+ func.ST_Intersection(
+ core.ST_GeoFromText(lois.c.WKT),
+ func.ST_Transform(buff.c["__buffer__"], 4326),
+ ).label("__geom__"),
+ func.ST_Length(
+ func.ST_Intersection(
+ func.ST_Transform(
+ core.ST_GeoFromText(lois.c.WKT), 3857
+ ),
+ buff.c["__buffer__"],
+ )
+ ).label("__len__"),
+ ],
+ func.ST_Intersects(
+ core.ST_GeoFromText(lois.c.WKT),
+ func.ST_Transform(buff.c["__buffer__"], 4326),
+ ),
+ ).cte("clip")
+
+ # Sum the length of all LOIs inside each buffer
+ sum_length = (
+ select(
+ [
+ clip.c["__index_level_0__"],
+ func.sum(clip.c["__len__"]).label(self.feature_name),
+ ]
+ )
+ .select_from(clip)
+ .group_by(clip.c["__index_level_0__"])
+ .cte("sum_length")
+ )
+
+ # Join the sum of the length of all LOIs inside each buffer
+ query = select(
+ [
+ col
+ for col in sum_length.columns
+ if col.key not in ("__len__", "__geom__", "__buffer__")
+ ],
+ sum_length.c["__index_level_0__"] == buff.c["__index_level_0__"],
+ )
+ return query
|
thinkingmachines/geomancer
|
fbc074eaa9d3e8e7d439da79bcb6fbfd6d0f8ae4
|
diff --git a/tests/spells/test_length_of.py b/tests/spells/test_length_of.py
new file mode 100644
index 0000000..4fd5d72
--- /dev/null
+++ b/tests/spells/test_length_of.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+# Import modules
+import pytest
+from google.cloud import bigquery
+from tests.spells.base_test_spell import BaseTestSpell, SpellDB
+
+# Import from package
+from geomancer.backend.settings import SQLiteConfig
+from geomancer.spells import LengthOf
+
+params = [
+ SpellDB(
+ spell=LengthOf(
+ on="residential",
+ within=50,
+ source_table="gis_osm_roads_free_1",
+ feature_name="len_residential",
+ options=SQLiteConfig(),
+ ),
+ dburl="sqlite:///tests/data/source.sqlite",
+ )
+]
+
+
[email protected]
+class TestLengthOf(BaseTestSpell):
+ @pytest.fixture(params=params, ids=["roads-sqlite"])
+ def spelldb(self, request):
+ return request.param
|
Add LengthOf function
The `LengthOf` function would compute the length of all line features inside a certain circular `radius` centered at a location with a certain `lat` and `lon`.
|
0.0
|
fbc074eaa9d3e8e7d439da79bcb6fbfd6d0f8ae4
|
[
"tests/spells/test_length_of.py::TestLengthOf::test_extract_columns_return_values[roads-sqlite-fclass:embassy]",
"tests/spells/test_length_of.py::TestLengthOf::test_extract_columns_return_values[roads-sqlite-embassy]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-22 12:37:57+00:00
|
mit
| 5,895 |
|
thinkingmachines__geomancer-49
|
diff --git a/geomancer/backend/settings.py b/geomancer/backend/settings.py
index c23a36e..6a43bad 100644
--- a/geomancer/backend/settings.py
+++ b/geomancer/backend/settings.py
@@ -45,7 +45,7 @@ class BQConfig(Config):
@property
def name(self):
- return "bq"
+ return "bigquery"
DATASET_ID = "geomancer"
EXPIRY = 3
diff --git a/geomancer/spellbook/__init__.py b/geomancer/spellbook/__init__.py
new file mode 100644
index 0000000..b27808e
--- /dev/null
+++ b/geomancer/spellbook/__init__.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+"""A :code:`SpellBook` is a collection of spells that can be sequentially casted and
+merged in a single dataframe
+
+ >>> from geomancer.spells import DistanceOf, NumberOf
+ >>> from geomancer.spellbook import SpellBook
+ >>> spellbook = SpellBook(
+ spells=[
+ DistanceOf(...),
+ NumberOf(...),
+ ],
+ )
+ >>> df = ...
+ >>> df_with_features = spellbook.cast(df)
+
+:code:`SpellBook`s can be distributed by exporting them to JSON files.
+
+ >>> spellbook.author = "My Name"
+ >>> spellbook.description = "My Features"
+ >>> spellbook.to_json("my_features.json")
+
+Now other people can easily reuse your feature extractions in with their own datasets!
+
+ >>> spellbook = SpellBook.read_json("my_features.json")
+ >>> my_df = ...
+ >>> my_df_with_features = spellbook.cast(my_df)
+"""
+
+# Import standard library
+import importlib
+import json
+
+# Import modules
+import pandas as pd
+
+
+class SpellBook(object):
+ def __init__(self, spells, column="WKT", author=None, description=None):
+ """SpellBook constructor
+
+ Parameters
+ ----------
+ spells : list of :class:`geomancer.spells.Spell`
+ List of spell instances.
+ column : str, optional
+ Column to look the geometries from. The default is :code:`WKT`
+ author : str, optional
+ Author of the spell book
+ description : str, optional
+ Description of the spell book
+ """
+ self.column = column
+ self.spells = spells
+ self.author = author
+ self.description = description
+
+ def cast(self, df):
+ """Runs the cast method of each spell in the spell book
+
+ Parameters
+ ----------
+ df : pandas.DataFrame
+ Dataframe containing the points to compare upon. By default, we
+ will look into the :code:`geometry` column. You can specify your
+ own column by passing an argument to the :code:`column` parameter.
+
+ Returns
+ -------
+ pandas.DataFrame
+ Output dataframe with the features from all spells
+ """
+ for spell in self.spells:
+ df = df.join(
+ spell.cast(
+ df, column=self.column, features_only=True
+ ).set_index("__index_level_0__")
+ )
+ return df
+
+ def to_json(self, filename=None, **kwargs):
+ """Exports spell book as a JSON string
+
+ Parameters
+ ----------
+ filename : str, optional
+ Output filename. If none is given, output is returned
+
+ Returns
+ -------
+ str or None
+ Export of spell book in JSON format
+ """
+ obj = {
+ **self.__dict__,
+ "spells": [
+ {
+ **s.__dict__,
+ "module": type(s).__module__,
+ "type": type(s).__name__,
+ }
+ for s in self.spells
+ ],
+ }
+ if filename:
+ with open(filename, "w") as f:
+ json.dump(obj, f, **kwargs)
+ else:
+ return json.dumps(obj, **kwargs)
+
+ @classmethod
+ def _instantiate_spells(cls, spells):
+ for spell in spells:
+ mod = importlib.import_module(spell.pop("module"))
+ spell_cls = getattr(mod, spell.pop("type"))
+ on = "{}:{}".format(
+ spell.pop("source_column"), spell.pop("source_filter")
+ )
+ yield spell_cls(on, **spell)
+
+ @classmethod
+ def read_json(cls, filename):
+ """Reads a JSON exported spell book
+
+ Parameters
+ ----------
+ filename : str
+ Filename of JSON file to read.
+
+ Returns
+ -------
+ :class:`geomancer.spellbook.SpellBook`
+ :code:`SpellBook` instance parsed from given JSON file.
+ """
+ with open(filename) as f:
+ obj = json.load(f)
+ obj["spells"] = cls._instantiate_spells(obj.pop("spells"))
+ return cls(**obj)
diff --git a/geomancer/spells/base.py b/geomancer/spells/base.py
index 34cd0e3..c82854c 100644
--- a/geomancer/spells/base.py
+++ b/geomancer/spells/base.py
@@ -37,8 +37,8 @@ class Spell(abc.ABC):
self,
source_table,
feature_name,
- column="WKT",
source_id="osm_id",
+ dburl=None,
options=None,
):
"""Spell constructor
@@ -49,17 +49,17 @@ class Spell(abc.ABC):
Table URI to run queries against.
feature_name : str
Column name for the output feature.
- column : str, optional
- Column to look the geometries from. The default is :code:`WKT`
+ dburl : str, optional
+ Database url used to configure backend connection
options : geomancer.Config, optional
Specify configuration for interacting with the database backend.
Auto-detected if not set.
"""
self.source_table = source_table
self.feature_name = feature_name
- self.options = options
- self.column = column
self.source_id = source_id
+ self.dburl = dburl
+ self.options = options
def extract_columns(self, x):
"""Spell constructor
@@ -120,8 +120,22 @@ class Spell(abc.ABC):
"""
raise NotImplementedError
- @logger.catch
- def cast(self, df, dburl):
+ def _include_column(self, col, keep_index, features_only):
+ if features_only:
+ return col.key in ("__index_level_0__", self.feature_name)
+ if keep_index:
+ return True
+ return col.key != "__index_level_0__"
+
+ @logger.catch(reraise=True)
+ def cast(
+ self,
+ df,
+ dburl=None,
+ column="WKT",
+ keep_index=False,
+ features_only=False,
+ ):
"""Apply the feature transform to an input pandas.DataFrame
If using BigQuery, a :code:`google.cloud.client.Client`
@@ -133,14 +147,28 @@ class Spell(abc.ABC):
Dataframe containing the points to compare upon. By default, we
will look into the :code:`geometry` column. You can specify your
own column by passing an argument to the :code:`column` parameter.
- dburl : str
+ dburl : str, optional
Database url used to configure backend connection
+ column : str, optional
+ Column to look the geometries from. The default is :code:`WKT`
+ keep_index : boolean, optional
+ Include index in output dataframe
+ features_only : boolean, optional
+ Only return features as output dataframe. Automatically sets
+ :code:`keep_index` to :code:`True`.
Returns
-------
pandas.DataFrame
Output dataframe with the features per given point
"""
+ dburl = dburl or self.dburl
+ if not dburl:
+ raise ValueError("dburl was not supplied")
+
+ if features_only:
+ keep_index = True
+
core = self.get_core(dburl)
# Get engine
@@ -152,11 +180,15 @@ class Spell(abc.ABC):
)
# Build query
- query = self.query(source, target, core)
+ query = self.query(source, target, core, column)
- # Remove temporary index column
+ # Filter output columns
query = select(
- [col for col in query.columns if col.key != "__index_level_0__"]
+ [
+ col
+ for col in query.columns
+ if self._include_column(col, keep_index, features_only)
+ ]
).select_from(query)
# Perform query
diff --git a/geomancer/spells/distance_to_nearest.py b/geomancer/spells/distance_to_nearest.py
index 707310e..df53544 100644
--- a/geomancer/spells/distance_to_nearest.py
+++ b/geomancer/spells/distance_to_nearest.py
@@ -26,15 +26,15 @@ class DistanceToNearest(Spell):
Column name for the output feature.
column : str, optional
Column to look the geometries from. The default is :code:`WKT`
- options : geomancer.Config
+ options : geomancer.Config, optional
Specify configuration for interacting with the database backend.
- Default is a BigQuery Configuration
+ Auto-detected if not set.
"""
super(DistanceToNearest, self).__init__(**kwargs)
self.source_column, self.source_filter = self.extract_columns(on)
self.within = within
- def query(self, source, target, core):
+ def query(self, source, target, core, column):
# Get all POIs of fclass `on`
pois = select(
[source.c[self.source_id], source.c.WKT],
@@ -42,7 +42,7 @@ class DistanceToNearest(Spell):
).cte("pois")
# Compute the distance from `column` to each POI within given distance
distance = func.ST_Distance(
- core.ST_GeoFromText(target.c[self.column]),
+ core.ST_GeoFromText(target.c[column]),
core.ST_GeoFromText(pois.c.WKT),
)
pairs = (
diff --git a/geomancer/spells/number_of.py b/geomancer/spells/number_of.py
index 3865177..b42e57b 100644
--- a/geomancer/spells/number_of.py
+++ b/geomancer/spells/number_of.py
@@ -26,15 +26,15 @@ class NumberOf(Spell):
Column name for the output feature.
column : str, optional
Column to look the geometries from. The default is :code:`WKT`
- options : geomancer.Config
+ options : geomancer.Config, optional
Specify configuration for interacting with the database backend.
- Default is a BigQuery Configuration
+ Auto-detected if not set.
"""
super(NumberOf, self).__init__(**kwargs)
self.source_column, self.source_filter = self.extract_columns(on)
self.within = within
- def query(self, source, target, core):
+ def query(self, source, target, core, column):
# Get all POIs of fclass `on`
pois = select(
[source.c[self.source_id], source.c.WKT],
@@ -42,7 +42,7 @@ class NumberOf(Spell):
).cte("pois")
# Compute the distance from `column` to each POI within given distance
distance = func.ST_Distance(
- core.ST_GeoFromText(target.c[self.column]),
+ core.ST_GeoFromText(target.c[column]),
core.ST_GeoFromText(pois.c.WKT),
)
pairs = (
|
thinkingmachines/geomancer
|
1e836d8cddb8c8c1f958775cb27a4a1e9e06523d
|
diff --git a/tests/conftest.py b/tests/conftest.py
index 5953f3f..65193fd 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -10,3 +10,9 @@ def sample_points():
"""Return a set of POINTS in a pandas.DataFrame"""
df = pd.read_csv("tests/data/sample_points.csv")
return df
+
+
[email protected]
+def spellbook_json():
+ with open("tests/data/spellbook.json") as f:
+ return f.read()
diff --git a/tests/data/spellbook.json b/tests/data/spellbook.json
new file mode 100644
index 0000000..80f4339
--- /dev/null
+++ b/tests/data/spellbook.json
@@ -0,0 +1,1 @@
+{"column": "WKT", "spells": [{"source_table": "gis_osm_pois_free_1", "feature_name": "dist_supermarket", "source_id": "osm_id", "dburl": "sqlite:///tests/data/source.sqlite", "options": null, "source_column": "fclass", "source_filter": "supermarket", "within": 10000, "module": "geomancer.spells.distance_to_nearest", "type": "DistanceToNearest"}, {"source_table": "gis_osm_pois_free_1", "feature_name": "num_embassy", "source_id": "osm_id", "dburl": "sqlite:///tests/data/source.sqlite", "options": null, "source_column": "fclass", "source_filter": "embassy", "within": 10000, "module": "geomancer.spells.number_of", "type": "NumberOf"}], "author": null, "description": null}
\ No newline at end of file
diff --git a/tests/spellbook/test_spellbook.py b/tests/spellbook/test_spellbook.py
new file mode 100644
index 0000000..ca65cea
--- /dev/null
+++ b/tests/spellbook/test_spellbook.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+
+# Import modules
+import pandas as pd
+import pytest
+
+# Import from package
+from geomancer.spellbook import SpellBook
+from geomancer.spells import DistanceToNearest, NumberOf
+
+
[email protected]("sample_points")
+def test_spell_dburl(sample_points):
+ with pytest.raises(ValueError, match="dburl was not supplied"):
+ spell = DistanceToNearest(
+ on="embassy",
+ source_table="gis_osm_pois_free_1",
+ feature_name="dist_embassy",
+ )
+ spell.cast(sample_points)
+
+
[email protected]("sample_points")
+def test_spell_keep_index(sample_points):
+ spell = DistanceToNearest(
+ on="embassy",
+ source_table="gis_osm_pois_free_1",
+ feature_name="dist_embassy",
+ )
+ df = spell.cast(
+ sample_points,
+ dburl="sqlite:///tests/data/source.sqlite",
+ keep_index=True,
+ )
+ assert "__index_level_0__" in df.columns
+ df = spell.cast(
+ sample_points,
+ dburl="sqlite:///tests/data/source.sqlite",
+ keep_index=False,
+ )
+ assert "__index_level_0__" not in df.columns
+
+
[email protected]("sample_points")
+def test_spell_features_only(sample_points):
+ spell = DistanceToNearest(
+ on="embassy",
+ source_table="gis_osm_pois_free_1",
+ feature_name="dist_embassy",
+ )
+ df = spell.cast(
+ sample_points,
+ dburl="sqlite:///tests/data/source.sqlite",
+ features_only=True,
+ )
+ assert ["__index_level_0__", "dist_embassy"] == df.columns.tolist()
+
+
[email protected]
+def spellbook():
+ return SpellBook(
+ [
+ DistanceToNearest(
+ "supermarket",
+ source_table="gis_osm_pois_free_1",
+ feature_name="dist_supermarket",
+ dburl="sqlite:///tests/data/source.sqlite",
+ ),
+ NumberOf(
+ on="embassy",
+ source_table="gis_osm_pois_free_1",
+ feature_name="num_embassy",
+ dburl="sqlite:///tests/data/source.sqlite",
+ ),
+ ]
+ )
+
+
[email protected]("spellbook", "sample_points")
+def test_spellbook_spells(spellbook, sample_points):
+ df = spellbook.cast(sample_points)
+ assert "dist_supermarket" in df.columns
+ assert "num_embassy" in df.columns
+
+
[email protected]("spellbook", "spellbook_json")
+def test_spellbook_to_json(spellbook, spellbook_json):
+ assert spellbook.to_json() == spellbook_json
+
+
[email protected]("spellbook", "spellbook_json")
+def test_spellbook_to_json_file(spellbook, spellbook_json, tmpdir):
+ filename = "spellbook.json"
+ f = tmpdir.mkdir(__name__).join(filename)
+ spellbook.to_json(f.strpath)
+ f.read() == spellbook_json
+
+
[email protected]("spellbook", "spellbook_json")
+def test_spellbook_read_json(spellbook, spellbook_json, tmpdir):
+ filename = "spellbook.json"
+ f = tmpdir.mkdir(__name__).join(filename)
+ f.write(spellbook_json)
+ _spellbook = SpellBook.read_json(f.strpath)
+ assert _spellbook.column == spellbook.column
+ assert _spellbook.author == spellbook.author
+ assert _spellbook.description == spellbook.description
+ for i, spell in enumerate(_spellbook.spells):
+ assert spell.__dict__ == spellbook.spells[i].__dict__
diff --git a/tests/spells/base_test_spell.py b/tests/spells/base_test_spell.py
index c8d0e3c..ec67e14 100644
--- a/tests/spells/base_test_spell.py
+++ b/tests/spells/base_test_spell.py
@@ -38,7 +38,9 @@ class BaseTestSpell:
engine=engine,
)
# Perform the test
- query = spelldb.spell.query(source=source, target=target, core=core)
+ query = spelldb.spell.query(
+ source=source, target=target, core=core, column="WKT"
+ )
assert isinstance(query, ClauseElement)
@pytest.mark.usefixtures("spelldb", "sample_points")
diff --git a/tests/spells/test_distance_to_nearest.py b/tests/spells/test_distance_to_nearest.py
index 43106e7..1bde1ed 100644
--- a/tests/spells/test_distance_to_nearest.py
+++ b/tests/spells/test_distance_to_nearest.py
@@ -16,13 +16,16 @@ params = [
),
dburl="sqlite:///tests/data/source.sqlite",
),
- SpellDB(
- spell=DistanceToNearest(
- on="primary",
- source_table="gis_osm_roads_free_1",
- feature_name="dist_primary",
+ pytest.param(
+ SpellDB(
+ spell=DistanceToNearest(
+ on="primary",
+ source_table="gis_osm_roads_free_1",
+ feature_name="dist_primary",
+ ),
+ dburl="sqlite:///tests/data/source.sqlite",
),
- dburl="sqlite:///tests/data/source.sqlite",
+ marks=pytest.mark.slow,
),
pytest.param(
SpellDB(
diff --git a/tests/spells/test_number_of.py b/tests/spells/test_number_of.py
index 9680f5e..2371e37 100644
--- a/tests/spells/test_number_of.py
+++ b/tests/spells/test_number_of.py
@@ -17,13 +17,16 @@ params = [
),
dburl="sqlite:///tests/data/source.sqlite",
),
- SpellDB(
- spell=NumberOf(
- on="primary",
- source_table="gis_osm_roads_free_1",
- feature_name="num_primary",
+ pytest.param(
+ SpellDB(
+ spell=NumberOf(
+ on="primary",
+ source_table="gis_osm_roads_free_1",
+ feature_name="num_primary",
+ ),
+ dburl="sqlite:///tests/data/source.sqlite",
),
- dburl="sqlite:///tests/data/source.sqlite",
+ marks=pytest.mark.slow,
),
pytest.param(
SpellDB(
|
Add SpellBook
Usage ideas:
```python
from geomancer import SpellBook
# When you want to register spells
my_spellbook = SpellBook([
DistanceToNearest("embassy", within=10000, source_table="tm-geospatial.ph_osm.pois"), # From BQ
DistanceToNearest("hospital", within=5000, source_table="pois"), # From Spatialite
])
# You can then do multiple casts
my_features = my_spellbook.cast(df, host=[bigquery.Client(), "tests/data/source.sqlite"])
# Saving the Spellbook
my_spellbook.author = "Lj Miranda" # optional
my_spellbook.description = "Some cool features for other stuff" # optional
my_spellbook.to_json("path/to/my/own/features.json")
```
Some potential challenges:
- It is possible to create a spellbook with spells coming from different warehouses (one feature from BQ, another from SQLite, etc.). However, setting the `source_table` and the `host` is decoupled (one during init, another during `cast()`).
- Concatenating everything inside a dataframe (similar output column names, etc.). We should do some validation before doing concat?
Some preliminary tasks:
- Write down all possible metadata to include: things that are automatically generated (date? unique ID? etc.) and those that are manually set).
|
0.0
|
1e836d8cddb8c8c1f958775cb27a4a1e9e06523d
|
[
"tests/spellbook/test_spellbook.py::test_spell_dburl",
"tests/spellbook/test_spellbook.py::test_spellbook_to_json",
"tests/spellbook/test_spellbook.py::test_spellbook_to_json_file",
"tests/spellbook/test_spellbook.py::test_spellbook_read_json",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-sqlite-fclass:embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-sqlite-embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-sqlite-fclass:embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-sqlite-embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-bq-fclass:embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[pois-bq-embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-bq-fclass:embassy]",
"tests/spells/test_distance_to_nearest.py::TestDistanceToNearest::test_extract_columns_return_values[roads-bq-embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-sqlite-fclass:embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-sqlite-embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-sqlite-fclass:embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-sqlite-embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-bq-fclass:embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[pois-bq-embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-bq-fclass:embassy]",
"tests/spells/test_number_of.py::TestNumberOf::test_extract_columns_return_values[roads-bq-embassy]"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-22 12:41:10+00:00
|
mit
| 5,896 |
|
thp__urlwatch-543
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 12ad2d1..ec27cfb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -26,6 +26,8 @@ The format mostly follows [Keep a Changelog](http://keepachangelog.com/en/1.0.0/
- Unit tests have been migrated from `nose` to `pytest`
and moved from `test/` to `lib/urlwatch/tests/`
- The ``css`` and ``xpath`` filters now accept ``skip`` and ``maxitems`` as subfilter
+- The ``shellpipe`` filter now inherits all environment variables (e.g. ``$PATH``)
+ of the ``urlwatch`` process
### Fixed
diff --git a/lib/urlwatch/filters.py b/lib/urlwatch/filters.py
index 390ac09..b11a3bb 100644
--- a/lib/urlwatch/filters.py
+++ b/lib/urlwatch/filters.py
@@ -788,10 +788,12 @@ class ShellPipeFilter(FilterBase):
encoding = sys.getdefaultencoding()
- env = {
+ # Work on a copy to not modify the outside environment
+ env = dict(os.environ)
+ env.update({
'URLWATCH_JOB_NAME': self.job.pretty_name() if self.job else '',
'URLWATCH_JOB_LOCATION': self.job.get_location() if self.job else '',
- }
+ })
return subprocess.check_output(subfilter['command'], shell=True,
input=data.encode(encoding), env=env).decode(encoding)
|
thp/urlwatch
|
65507c55ff5f467687d4bef4ca4d99db55dce24a
|
diff --git a/lib/urlwatch/tests/test_filters.py b/lib/urlwatch/tests/test_filters.py
index 80465f3..ca85120 100644
--- a/lib/urlwatch/tests/test_filters.py
+++ b/lib/urlwatch/tests/test_filters.py
@@ -71,3 +71,20 @@ def test_providing_subfilter_to_filter_without_subfilter_raises_valueerror():
def test_providing_unknown_subfilter_raises_valueerror():
with pytest.raises(ValueError):
list(FilterBase.normalize_filter_list([{'grep': {'re': 'Price: .*', 'anothersubfilter': '42'}}]))
+
+
+def test_shellpipe_inherits_environment_but_does_not_modify_it():
+ # https://github.com/thp/urlwatch/issues/541
+
+ # Set a specific value to check it doesn't overwrite the current env
+ os.environ['URLWATCH_JOB_NAME'] = 'should-not-be-overwritten'
+
+ # See if the shellpipe process can use a variable from the outside
+ os.environ['INHERITED_FROM'] = 'parent-process'
+ filtercls = FilterBase.__subclasses__.get('shellpipe')
+ result = filtercls(None, None).filter('input-string', {'command': 'echo "$INHERITED_FROM/$URLWATCH_JOB_NAME"'})
+ # Check that the inherited value and the job name is set properly
+ assert result == 'parent-process/\n'
+
+ # Check that outside the variable wasn't overwritten by the filter
+ assert os.environ['URLWATCH_JOB_NAME'] == 'should-not-be-overwritten'
|
The shellpipe filter is removing all environment variables
The shellpipe filter is removing all environment variables, then adding
URLWATCH_JOB_NAME and URLWATCH_JOB_LOCATION. I believe
the code in filters.py should just be adding them to the existing environment.
As a result (simple example shown below), the "sh" that's forked can run
programs in system directories only because of fallback path-search behavior.
Under Debian, where /bin/sh is a symlink to /bin/dash, an strace shows that
"sh" searches /usr/local/{s,}bin /usr/{s,}bin} and /{s,}bin.
urlwatch fails to find the shellpipe command when it's in ~/bin and isn't
specified with its pathname.
Specifying a "diff_tool" does find the command if it's in my ~/bin
and doesn't empty the environment.
I'm using v2.19 as installed by "pip3" (not the latest code on Github,
though the env. handling appears to be the same).
% env | grep PATH
PATH=/home/stosh/bin:/usr/local/bin:/usr/bin:/bin
% cat date_cat.yaml
name: watchdog-cat
command: "date"
filter:
- shellpipe: "env >/dev/tty; cat"
% cat date_mycat.yaml
name: watchdog-mycat
command: "date"
filter:
- shellpipe: "env >/dev/tty; mycat"
% cmp /bin/cat ~/bin/mycat
% urlwatch --cache date.db --config ~/.config/urlwatch/urlwatch_orig.yaml --urls date_cat.yaml
URLWATCH_JOB_NAME=watchdog-cat
URLWATCH_JOB_LOCATION=date
PWD=/home/stosh
===========================================================================
01. CHANGED: watchdog-cat
===========================================================================
---------------------------------------------------------------------------
CHANGED: watchdog-cat ( date )
---------------------------------------------------------------------------
--- @ Tue, 28 Jul 2020 17:55:41 -0400
+++ @ Tue, 28 Jul 2020 17:56:27 -0400
@@ -1 +1 @@
-Tue Jul 28 17:55:41 EDT 2020
+Tue Jul 28 17:56:27 EDT 2020
---------------------------------------------------------------------------
--
urlwatch 2.19, Copyright 2008-2020 Thomas Perl
Website: https://thp.io/2008/urlwatch/
watched 1 URLs in 0 seconds
% urlwatch --cache date.db --config ~/.config/urlwatch/urlwatch_orig.yaml --urls date_mycat.yaml
URLWATCH_JOB_NAME=watchdog-mycat
URLWATCH_JOB_LOCATION=date
PWD=/home/stosh
/bin/sh: 1: mycat: not found
===========================================================================
01. ERROR: watchdog-mycat
===========================================================================
---------------------------------------------------------------------------
ERROR: watchdog-mycat ( date )
---------------------------------------------------------------------------
Traceback (most recent call last):
File "/home/stosh/.local/lib/python3.7/site-packages/urlwatch/handler.py", line 92, in process
data = FilterBase.process(filter_kind, subfilter, self, data)
File "/home/stosh/.local/lib/python3.7/site-packages/urlwatch/filters.py", line 145, in process
return filtercls(state.job, state).filter(data, subfilter)
File "/home/stosh/.local/lib/python3.7/site-packages/urlwatch/filters.py", line 784, in filter
input=data.encode(encoding), env=env).decode(encoding)
File "/usr/lib/python3.7/subprocess.py", line 395, in check_output
**kwargs).stdout
File "/usr/lib/python3.7/subprocess.py", line 487, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command 'env >/dev/tty; mycat' returned non-zero exit status 127.
---------------------------------------------------------------------------
--
urlwatch 2.19, Copyright 2008-2020 Thomas Perl
Website: https://thp.io/2008/urlwatch/
watched 1 URLs in 0 seconds
|
0.0
|
65507c55ff5f467687d4bef4ca4d99db55dce24a
|
[
"lib/urlwatch/tests/test_filters.py::test_shellpipe_inherits_environment_but_does_not_modify_it"
] |
[
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[grep-output0]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[grep:foo-output1]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[beautify,grep:foo,html2text-output2]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[re.sub:.*-output3]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[re.sub-output4]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input5-output5]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input6-output6]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input7-output7]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input8-output8]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input9-output9]",
"lib/urlwatch/tests/test_filters.py::test_normalize_filter_list[input10-output10]",
"lib/urlwatch/tests/test_filters.py::test_filters[element_by_tag-test_data0]",
"lib/urlwatch/tests/test_filters.py::test_filters[element_by_tag_nested-test_data1]",
"lib/urlwatch/tests/test_filters.py::test_filters[element_by_id-test_data2]",
"lib/urlwatch/tests/test_filters.py::test_filters[element_by_class-test_data3]",
"lib/urlwatch/tests/test_filters.py::test_filters[xpath_elements-test_data4]",
"lib/urlwatch/tests/test_filters.py::test_filters[xpath_text-test_data5]",
"lib/urlwatch/tests/test_filters.py::test_filters[xpath_exclude-test_data6]",
"lib/urlwatch/tests/test_filters.py::test_filters[xpath_xml_namespaces-test_data7]",
"lib/urlwatch/tests/test_filters.py::test_filters[grep-test_data11]",
"lib/urlwatch/tests/test_filters.py::test_filters[grep_with_comma-test_data12]",
"lib/urlwatch/tests/test_filters.py::test_filters[json_format-test_data13]",
"lib/urlwatch/tests/test_filters.py::test_filters[json_format_subfilter-test_data14]",
"lib/urlwatch/tests/test_filters.py::test_filters[sha1-test_data15]",
"lib/urlwatch/tests/test_filters.py::test_filters[hexdump-test_data16]",
"lib/urlwatch/tests/test_filters.py::test_filters[sort-test_data17]",
"lib/urlwatch/tests/test_filters.py::test_filters[sort_paragraphs-test_data18]",
"lib/urlwatch/tests/test_filters.py::test_filters[sort_separator_reverse-test_data19]",
"lib/urlwatch/tests/test_filters.py::test_filters[sort_reverse-test_data20]",
"lib/urlwatch/tests/test_filters.py::test_filters[reverse_lines-test_data21]",
"lib/urlwatch/tests/test_filters.py::test_filters[reverse_separator_dict-test_data22]",
"lib/urlwatch/tests/test_filters.py::test_filters[reverse_separator_str-test_data23]",
"lib/urlwatch/tests/test_filters.py::test_filters[reverse_separator_paragraph-test_data24]",
"lib/urlwatch/tests/test_filters.py::test_filters[re_sub_multiline-test_data25]",
"lib/urlwatch/tests/test_filters.py::test_invalid_filter_name_raises_valueerror",
"lib/urlwatch/tests/test_filters.py::test_providing_subfilter_to_filter_without_subfilter_raises_valueerror",
"lib/urlwatch/tests/test_filters.py::test_providing_unknown_subfilter_raises_valueerror"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-29 15:43:24+00:00
|
bsd-3-clause
| 5,897 |
|
thp__urlwatch-785
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dbf2441..9e96ff9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,10 @@ The format mostly follows [Keep a Changelog](http://keepachangelog.com/en/1.0.0/
## UNRELEASED
+### Added
+
+- New `enabled` option for all jobs. Set to false to disable a job without needing to remove it or comment it out (Requested in #625 by snowman, contributed in #785 by jamstah)
+
### Changed
- Remove EOL'd Python 3.7 (new minimum requirement is Python 3.8), add Python 3.12 testing
@@ -15,6 +19,7 @@ The format mostly follows [Keep a Changelog](http://keepachangelog.com/en/1.0.0/
- Fix documentation for watching Github tags and releases, again (#723)
- Fix `--test-reporter` command-line option so `separate` configuration option is no longer ignored when sending test notifications (#772, by marunjar)
- Fix line height and dark mode regression (#774 reported by kongomongo, PRs #777 and #778 by trevorshannon)
+- Fix compatibility with lxml >= 5 which caused the CSS Selector filter to fail (#783 reported by jamesquilty, PR #786 by jamstah)
## [2.28] -- 2023-05-03
diff --git a/docs/source/jobs.rst b/docs/source/jobs.rst
index 8c55d58..e2c51f5 100644
--- a/docs/source/jobs.rst
+++ b/docs/source/jobs.rst
@@ -169,13 +169,14 @@ Optional keys for all job types
- ``name``: Human-readable name/label of the job
- ``filter``: :doc:`filters` (if any) to apply to the output (can be tested with ``--test-filter``)
-- ``max_tries``: Number of times to retry fetching the resource
+- ``max_tries``: After this many sequential failed runs, the error will be reported rather than ignored
- ``diff_tool``: Command to a custom tool for generating diff text
- ``diff_filter``: :doc:`filters` (if any) to apply to the diff result (can be tested with ``--test-diff-filter``)
- ``treat_new_as_changed``: Will treat jobs that don't have any historic data as ``CHANGED`` instead of ``NEW`` (and create a diff for new jobs)
- ``compared_versions``: Number of versions to compare for similarity
- ``kind`` (redundant): Either ``url``, ``shell`` or ``browser``. Automatically derived from the unique key (``url``, ``command`` or ``navigate``) of the job type
- ``user_visible_url``: Different URL to show in reports (e.g. when watched URL is a REST API URL, and you want to show a webpage)
+- ``enabled``: Can be set to false to disable an individual job (default is ``true``)
Setting keys for all jobs at once
diff --git a/lib/urlwatch/filters.py b/lib/urlwatch/filters.py
index 7b7c95b..ed21b4c 100644
--- a/lib/urlwatch/filters.py
+++ b/lib/urlwatch/filters.py
@@ -761,9 +761,9 @@ class LxmlParser:
excluded_elems = None
if self.filter_kind == 'css':
selected_elems = CSSSelector(self.expression,
- namespaces=self.namespaces).evaluate(root)
+ namespaces=self.namespaces)(root)
excluded_elems = CSSSelector(self.exclude,
- namespaces=self.namespaces).evaluate(root) if self.exclude else None
+ namespaces=self.namespaces)(root) if self.exclude else None
elif self.filter_kind == 'xpath':
selected_elems = root.xpath(self.expression, namespaces=self.namespaces)
excluded_elems = root.xpath(self.exclude, namespaces=self.namespaces) if self.exclude else None
diff --git a/lib/urlwatch/jobs.py b/lib/urlwatch/jobs.py
index f4db821..d89f41f 100644
--- a/lib/urlwatch/jobs.py
+++ b/lib/urlwatch/jobs.py
@@ -196,7 +196,7 @@ class JobBase(object, metaclass=TrackSubClasses):
class Job(JobBase):
__required__ = ()
- __optional__ = ('name', 'filter', 'max_tries', 'diff_tool', 'compared_versions', 'diff_filter', 'treat_new_as_changed', 'user_visible_url')
+ __optional__ = ('name', 'filter', 'max_tries', 'diff_tool', 'compared_versions', 'diff_filter', 'enabled', 'treat_new_as_changed', 'user_visible_url')
# determine if hyperlink "a" tag is used in HtmlReporter
def location_is_url(self):
@@ -205,6 +205,9 @@ class Job(JobBase):
def pretty_name(self):
return self.name if self.name else self.get_location()
+ def is_enabled(self):
+ return self.enabled is None or self.enabled
+
class ShellJob(Job):
"""Run a shell command and get its standard output"""
diff --git a/lib/urlwatch/worker.py b/lib/urlwatch/worker.py
index 8a7ea8c..23e710b 100644
--- a/lib/urlwatch/worker.py
+++ b/lib/urlwatch/worker.py
@@ -55,7 +55,7 @@ def run_jobs(urlwatcher):
raise ValueError(f'All job indices must be between 1 and {len(urlwatcher.jobs)}: {urlwatcher.urlwatch_config.joblist}')
cache_storage = urlwatcher.cache_storage
jobs = [job.with_defaults(urlwatcher.config_storage.config)
- for (idx, job) in enumerate(urlwatcher.jobs) if ((idx + 1) in urlwatcher.urlwatch_config.joblist or (not urlwatcher.urlwatch_config.joblist))]
+ for (idx, job) in enumerate(urlwatcher.jobs) if job.is_enabled() and ((idx + 1) in urlwatcher.urlwatch_config.joblist or (not urlwatcher.urlwatch_config.joblist))]
report = urlwatcher.report
logger.debug('Processing %d jobs (out of %d)', len(jobs), len(urlwatcher.jobs))
|
thp/urlwatch
|
e342af925930114b4194f1bdb660dec6348f653a
|
diff --git a/lib/urlwatch/tests/data/disabled-job.yaml b/lib/urlwatch/tests/data/disabled-job.yaml
new file mode 100644
index 0000000..8b550c3
--- /dev/null
+++ b/lib/urlwatch/tests/data/disabled-job.yaml
@@ -0,0 +1,6 @@
+name: "1"
+url: "|echo job 1"
+enabled: false
+---
+name: "2"
+url: "|echo job 2"
diff --git a/lib/urlwatch/tests/test_handler.py b/lib/urlwatch/tests/test_handler.py
index 7886acc..8d90cbd 100644
--- a/lib/urlwatch/tests/test_handler.py
+++ b/lib/urlwatch/tests/test_handler.py
@@ -122,6 +122,27 @@ def test_run_watcher():
cache_storage.close()
+def test_disabled_job():
+ with teardown_func():
+ urls = os.path.join(here, 'data', 'disabled-job.yaml')
+ config = os.path.join(here, 'data', 'urlwatch.yaml')
+ cache = os.path.join(here, 'data', 'cache.db')
+ hooks = ''
+
+ config_storage = YamlConfigStorage(config)
+ urls_storage = UrlsYaml(urls)
+ cache_storage = CacheMiniDBStorage(cache)
+ try:
+ urlwatch_config = ConfigForTest(config, urls, cache, hooks, True)
+
+ urlwatcher = Urlwatch(urlwatch_config, config_storage, cache_storage, urls_storage)
+ urlwatcher.run_jobs()
+
+ assert len(urlwatcher.report.job_states) == 1
+ finally:
+ cache_storage.close()
+
+
def test_unserialize_shell_job_without_kind():
job = JobBase.unserialize({
'name': 'hoho',
|
Feature request: support optional key `disabled`
- `disabled`: disable watch (default: False)
When set to `True`, do not watch the specified entry.
|
0.0
|
e342af925930114b4194f1bdb660dec6348f653a
|
[
"lib/urlwatch/tests/test_handler.py::test_disabled_job"
] |
[
"lib/urlwatch/tests/test_handler.py::test_required_classattrs_in_subclasses",
"lib/urlwatch/tests/test_handler.py::test_save_load_jobs",
"lib/urlwatch/tests/test_handler.py::test_load_config_yaml",
"lib/urlwatch/tests/test_handler.py::test_load_urls_txt",
"lib/urlwatch/tests/test_handler.py::test_load_urls_yaml",
"lib/urlwatch/tests/test_handler.py::test_load_hooks_py",
"lib/urlwatch/tests/test_handler.py::test_run_watcher",
"lib/urlwatch/tests/test_handler.py::test_unserialize_shell_job_without_kind",
"lib/urlwatch/tests/test_handler.py::test_unserialize_with_unknown_key",
"lib/urlwatch/tests/test_handler.py::test_number_of_tries_in_cache_is_increased",
"lib/urlwatch/tests/test_handler.py::test_report_error_when_out_of_tries",
"lib/urlwatch/tests/test_handler.py::test_reset_tries_to_zero_when_successful"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-17 13:32:34+00:00
|
bsd-3-clause
| 5,898 |
|
tianocore__edk2-pytool-extensions-125
|
diff --git a/edk2toolext/environment/extdeptypes/git_dependency.py b/edk2toolext/environment/extdeptypes/git_dependency.py
index 4097716..82ad9b5 100644
--- a/edk2toolext/environment/extdeptypes/git_dependency.py
+++ b/edk2toolext/environment/extdeptypes/git_dependency.py
@@ -13,7 +13,6 @@
from edk2toolext.environment.external_dependency import ExternalDependency
from edk2toolext.environment import repo_resolver
from edk2toolext.edk2_git import Repo
-from edk2toolext.environment import version_aggregator
from edk2toolext.environment import shell_environment
@@ -78,7 +77,7 @@ def clean(self):
super().clean()
# override verify due to different scheme with git
- def verify(self, logversion=True):
+ def verify(self):
result = True
if not os.path.isdir(self._local_repo_root_path):
@@ -104,7 +103,4 @@ def verify(self, logversion=True):
result = False
self.logger.debug("Verify '%s' returning '%s'." % (self.name, result))
- if(logversion):
- version_aggregator.GetVersionAggregator().ReportVersion(self.name, self.version,
- version_aggregator.VersionTypes.INFO)
return result
|
tianocore/edk2-pytool-extensions
|
d122343ac18e896ce802ec22402ad59933f8bff0
|
diff --git a/edk2toolext/tests/test_git_dependency.py b/edk2toolext/tests/test_git_dependency.py
index af6156b..d12ce25 100644
--- a/edk2toolext/tests/test_git_dependency.py
+++ b/edk2toolext/tests/test_git_dependency.py
@@ -87,7 +87,7 @@ def test_fetch_verify_good_repo_at_top_of_tree(self):
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
- self.assertTrue(ext_dep.verify(logversion=False))
+ self.assertTrue(ext_dep.verify())
self.assertEqual(ext_dep.version, uptodate_version)
def test_fetch_verify_good_repo_at_not_top_of_tree(self):
@@ -98,7 +98,7 @@ def test_fetch_verify_good_repo_at_not_top_of_tree(self):
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
- self.assertTrue(ext_dep.verify(logversion=False))
+ self.assertTrue(ext_dep.verify())
self.assertEqual(ext_dep.version, behind_one_version)
def test_fetch_verify_non_existant_repo_commit_hash(self):
@@ -110,7 +110,7 @@ def test_fetch_verify_non_existant_repo_commit_hash(self):
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
self.assertEqual(ext_dep.version, invalid_version)
- self.assertFalse(ext_dep.verify(logversion=False), "Should not verify")
+ self.assertFalse(ext_dep.verify(), "Should not verify")
def test_verify_no_directory(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
@@ -119,7 +119,7 @@ def test_verify_no_directory(self):
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
- self.assertFalse(ext_dep.verify(logversion=False))
+ self.assertFalse(ext_dep.verify())
def test_verify_empty_repo_dir(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
@@ -129,7 +129,7 @@ def test_verify_empty_repo_dir(self):
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
os.makedirs(ext_dep._local_repo_root_path, exist_ok=True)
- self.assertFalse(ext_dep.verify(logversion=False))
+ self.assertFalse(ext_dep.verify())
def test_verify_invalid_git_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
@@ -141,7 +141,7 @@ def test_verify_invalid_git_repo(self):
os.makedirs(ext_dep._local_repo_root_path, exist_ok=True)
with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file:
my_file.write("Test code\n")
- self.assertFalse(ext_dep.verify(logversion=False))
+ self.assertFalse(ext_dep.verify())
def test_verify_dirty_git_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
@@ -154,7 +154,7 @@ def test_verify_dirty_git_repo(self):
# now write a new file
with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file:
my_file.write("Test code to make repo dirty\n")
- self.assertFalse(ext_dep.verify(logversion=False))
+ self.assertFalse(ext_dep.verify())
def test_verify_up_to_date(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
@@ -164,7 +164,7 @@ def test_verify_up_to_date(self):
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
- self.assertTrue(ext_dep.verify(logversion=False))
+ self.assertTrue(ext_dep.verify())
def test_verify_down_level_repo(self):
ext_dep_file_path = os.path.join(test_dir, "hw_ext_dep.json")
@@ -174,16 +174,16 @@ def test_verify_down_level_repo(self):
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
ext_dep.fetch()
- self.assertTrue(ext_dep.verify(logversion=False), "Confirm valid ext_dep at one commit behind")
+ self.assertTrue(ext_dep.verify(), "Confirm valid ext_dep at one commit behind")
with open(ext_dep_file_path, "w+") as ext_dep_file:
ext_dep_file.write(hw_json_template % uptodate_version)
ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
ext_dep = GitDependency(ext_dep_descriptor)
- self.assertFalse(ext_dep.verify(logversion=False), "Confirm downlevel repo fails to verify")
+ self.assertFalse(ext_dep.verify(), "Confirm downlevel repo fails to verify")
ext_dep.fetch()
- self.assertTrue(ext_dep.verify(logversion=False), "Confirm repo can be updated")
+ self.assertTrue(ext_dep.verify(), "Confirm repo can be updated")
# CLEAN TESTS
|
Git Dependencies causes error with version aggregator
**Describe the bug**
When cloning a git dependency for the first time, we report the version twice. Once before we clone and once after. Since the path before the clone is none, the paths don't match and it throws an error.
**To Reproduce**
Steps to reproduce the behavior:
1. Clone a git ext dep in your tree via stuart_setup
2. See error
**Additional context**
Add any other context about the problem here.
|
0.0
|
d122343ac18e896ce802ec22402ad59933f8bff0
|
[
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_fetch_verify_good_repo_at_not_top_of_tree",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_fetch_verify_non_existant_repo_commit_hash",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_down_level_repo",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_empty_repo_dir",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_invalid_git_repo",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_no_directory"
] |
[
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_clean_repo",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_dir_but_not_git_repo",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_dirty_git_repo",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_clean_no_directory",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_fetch_verify_good_repo_at_top_of_tree",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_dirty_git_repo",
"edk2toolext/tests/test_git_dependency.py::TestGitDependency::test_verify_up_to_date",
"edk2toolext/tests/test_git_dependency.py::TestGitDependencyUrlPatching::test_url_should_be_modified_if_creds_are_indicated_and_supplied",
"edk2toolext/tests/test_git_dependency.py::TestGitDependencyUrlPatching::test_url_should_not_be_modified_without_descriptor_field",
"edk2toolext/tests/test_git_dependency.py::TestGitDependencyUrlPatching::test_url_should_not_be_modified_without_env"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-11-22 18:06:56+00:00
|
bsd-2-clause
| 5,899 |
|
tianocore__edk2-pytool-extensions-341
|
diff --git a/docs/usability/environment_variables.md b/docs/usability/environment_variables.md
index 5dab021..52611e5 100644
--- a/docs/usability/environment_variables.md
+++ b/docs/usability/environment_variables.md
@@ -28,3 +28,13 @@ calling `stuart_update` and make it far easier to switch between multiple branch
scopes where dependencies may change.
For more info, see [the External Dependencies docs](using_extdep.md).
+
+## EDK_BUILD_CMD
+
+If present, the absolute path to an application to use for the edk build process will be
+invoked instead of `build`. This is primarily used to allow a tool to wrap around `build`.
+
+## EDK_BUILD_PARAMS
+
+If present, these parameters will be passed to the build command. This is primarily used to
+pair wrapper-specific parameters with the wrapper passed in `EDK_BUILD_CMD`.
diff --git a/edk2toolext/environment/uefi_build.py b/edk2toolext/environment/uefi_build.py
index 0863b12..575bcab 100644
--- a/edk2toolext/environment/uefi_build.py
+++ b/edk2toolext/environment/uefi_build.py
@@ -310,7 +310,18 @@ def Build(self):
pre_build_env_chk = env.checkpoint()
env.set_shell_var('PYTHONHASHSEED', '0')
env.log_environment()
- ret = RunCmd("build", params)
+
+ edk2_build_cmd = self.env.GetValue("EDK_BUILD_CMD")
+ if edk2_build_cmd is None:
+ edk2_build_cmd = "build"
+ logging.debug(f"The edk2 build command is {edk2_build_cmd}")
+
+ edk2_build_params = self.env.GetValue("EDK_BUILD_PARAMS")
+ if edk2_build_params is None:
+ edk2_build_params = params
+ logging.debug(f"Edk2 build parameters are {edk2_build_params}")
+
+ ret = RunCmd(edk2_build_cmd, edk2_build_params)
# WORKAROUND - Undo the workaround.
env.restore_checkpoint(pre_build_env_chk)
|
tianocore/edk2-pytool-extensions
|
958d682f902c305cc8e6df4cbb9a139d8094b83a
|
diff --git a/edk2toolext/tests/test_uefi_build.py b/edk2toolext/tests/test_uefi_build.py
index 550bd15..19cf44d 100644
--- a/edk2toolext/tests/test_uefi_build.py
+++ b/edk2toolext/tests/test_uefi_build.py
@@ -10,9 +10,12 @@
from edk2toolext.environment import uefi_build
from edk2toolext.environment.plugintypes import uefi_helper_plugin
from edk2toolext.environment.plugin_manager import PluginManager
+from edk2toollib.utility_functions import GetHostInfo
import argparse
import tempfile
import os
+import stat
+from inspect import cleandoc
from edk2toolext.environment import shell_environment
@@ -91,6 +94,72 @@ def test_go_skip_building(self):
ret = builder.Go(self.WORKSPACE, "", helper, manager)
self.assertEqual(ret, 0)
+ def test_build_wrapper(self):
+ """Tests that a build wrapper can be used."""
+ builder = uefi_build.UefiBuilder()
+
+ # Post-build is not needed to test the build wrapper
+ builder.SkipPostBuild = True
+
+ # Some basic build variables need to be set to make it through
+ # the build preamble to the point the wrapper gets called.
+ shell_environment.GetBuildVars().SetValue("TARGET_ARCH",
+ "IA32",
+ "Set in build wrapper test")
+ shell_environment.GetBuildVars().SetValue("EDK_TOOLS_PATH",
+ self.WORKSPACE,
+ "Set in build wrapper test")
+
+ # "build_wrapper" -> The actual build_wrapper script
+ # "test_file" -> An empty file written by build_wrapper
+ build_wrapper_path = os.path.join(self.WORKSPACE, "build_wrapper")
+ test_file_path = os.path.join(self.WORKSPACE, "test_file")
+
+ # This script will write an empty file called "test_file" to the
+ # temporary directory (workspace) to demonstrate that it ran successfully
+ build_wrapper_file_content = """
+ import os
+ import sys
+
+ test_file_dir = os.path.dirname(os.path.realpath(__file__))
+ test_file_path = os.path.join(test_file_dir, "test_file")
+
+ with open(test_file_path, 'w'):
+ pass
+
+ sys.exit(0)
+ """
+
+ build_wrapper_cmd = "python"
+ build_wrapper_params = os.path.normpath(build_wrapper_path)
+
+ TestUefiBuild.write_to_file(
+ build_wrapper_path,
+ cleandoc(build_wrapper_file_content))
+
+ if GetHostInfo().os == "Linux":
+ os.chmod(build_wrapper_path,
+ os.stat(build_wrapper_path).st_mode | stat.S_IEXEC)
+
+ # This is the main point of this test. The wrapper file should be
+ # executed instead of the build command. In real scenarios, the wrapper
+ # script would subsequently call the build command.
+ shell_environment.GetBuildVars().SetValue(
+ "EDK_BUILD_CMD", build_wrapper_cmd, "Set in build wrapper test")
+ shell_environment.GetBuildVars().SetValue(
+ "EDK_BUILD_PARAMS", build_wrapper_params, "Set in build wrapper test")
+
+ manager = PluginManager()
+ helper = uefi_helper_plugin.HelperFunctions()
+ ret = builder.Go(self.WORKSPACE, "", helper, manager)
+
+ # Check the build wrapper return code
+ self.assertEqual(ret, 0)
+
+ # Check that the build wrapper ran successfully by checking that the
+ # file written by the build wrapper file exists
+ self.assertTrue(os.path.isfile(test_file_path))
+
# TODO finish unit test
|
[Feature]: Allow build wrappers
### What does the feature solve?
Stuart ultimately calls the edk2 `build` command to build code.
It would be useful to allow plugins or other Python code leveraging stuart to be able to reuse stuart logic but substitute that actual build command with a build wrapper. Wrappers can be used to trace build commands and perform special build setup that does not need to be officially support in the stuart build logic.
### Describe the solution
Allow the build command and build parameters used by stuart to be specified dynamically.
Build variables would be a simple solution.
### Have you considered any alternatives?
Yes, PR #324.
However, most of that work can be done in a plugin so it does not bloat the main stuart logic. The minimum piece needed for the plugin to work is the change in this feature request.
### Additional context
Used to wrap the CodeQL CLI around the build command.
|
0.0
|
958d682f902c305cc8e6df4cbb9a139d8094b83a
|
[
"edk2toolext/tests/test_uefi_build.py::TestUefiBuild::test_build_wrapper"
] |
[
"edk2toolext/tests/test_uefi_build.py::TestUefiBuild::test_commandline_options",
"edk2toolext/tests/test_uefi_build.py::TestUefiBuild::test_go_skip_building",
"edk2toolext/tests/test_uefi_build.py::TestUefiBuild::test_init"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-07 15:08:46+00:00
|
bsd-2-clause
| 5,900 |
|
tianocore__edk2-pytool-extensions-368
|
diff --git a/.cspell.json b/.cspell.json
index 08d3448..28c3cdd 100644
--- a/.cspell.json
+++ b/.cspell.json
@@ -129,6 +129,7 @@
"rtags",
"Pdbs",
"uncrustify",
- "mkdocs"
+ "mkdocs",
+ "pygit"
]
}
diff --git a/edk2toolext/environment/self_describing_environment.py b/edk2toolext/environment/self_describing_environment.py
index 35ecd91..f5d3158 100644
--- a/edk2toolext/environment/self_describing_environment.py
+++ b/edk2toolext/environment/self_describing_environment.py
@@ -13,11 +13,12 @@
"""
import os
import logging
-import pathlib
+import pygit2
from edk2toolext.environment import shell_environment
from edk2toolext.environment import environment_descriptor_files as EDF
from edk2toolext.environment import external_dependency
from multiprocessing import dummy
+from pathlib import Path
import time
@@ -44,7 +45,19 @@ def __init__(self, workspace_path, scopes=(), skipped_dirs=()):
self.scopes = scopes
# Allow certain directories to be skipped
- self.skipped_dirs = tuple(map(pathlib.Path, (os.path.join(self.workspace, d) for d in skipped_dirs)))
+ self.skipped_dirs = tuple(map(Path, (os.path.join(self.workspace, d) for d in skipped_dirs)))
+
+ # Respect git worktrees
+ repo_path = pygit2.discover_repository(self.workspace)
+ if repo_path:
+ repo = pygit2.Repository(repo_path)
+ worktrees = repo.list_worktrees()
+ for worktree in worktrees:
+ worktree_path = Path(repo.lookup_worktree(worktree).path)
+ if (worktree_path.is_dir()
+ and Path(self.workspace) != worktree_path
+ and worktree_path not in skipped_dirs):
+ self.skipped_dirs += (worktree_path,)
# Validate that all scopes are unique.
if len(self.scopes) != len(set(self.scopes)):
@@ -67,8 +80,8 @@ def _gather_env_files(self, ext_strings, base_path):
dirs[:] = [d
for d
in dirs
- if pathlib.Path(root, d) not in self.skipped_dirs
- and pathlib.Path(root, d).name != '.git']
+ if Path(root, d) not in self.skipped_dirs
+ and Path(root, d).name != '.git']
# Check for any files that match the extensions we're looking for.
for file in files:
diff --git a/requirements.txt b/requirements.txt
index 9c75c19..65e5d72 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,3 +7,4 @@ coverage == 6.5.0
pyopenssl == 22.1.0
pefile == 2022.5.30
semantic_version == 2.10.0
+pygit2 == 1.11.1
diff --git a/setup.py b/setup.py
index abe879a..c250e36 100644
--- a/setup.py
+++ b/setup.py
@@ -83,7 +83,8 @@ def run(self): # noqa
'pyyaml>=5.3.1',
'edk2-pytool-library>=0.12.1',
'pefile>=2019.4.18',
- 'semantic_version>=2.10.0'
+ 'semantic_version>=2.10.0',
+ 'pygit2>=1.11.1'
],
extras_require={
'openssl': ['pyopenssl']
|
tianocore/edk2-pytool-extensions
|
9e5ecb2aa7aae5a33ca57f7e2c95eb09c379295f
|
diff --git a/edk2toolext/tests/test_self_describing_environment.py b/edk2toolext/tests/test_self_describing_environment.py
index 6900c8f..2277791 100644
--- a/edk2toolext/tests/test_self_describing_environment.py
+++ b/edk2toolext/tests/test_self_describing_environment.py
@@ -7,6 +7,7 @@
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
+import pygit2
import unittest
import tempfile
from edk2toolext.environment import self_describing_environment
@@ -114,6 +115,57 @@ def test_duplicate_id_path_env_2(self):
self_describing_environment.BootstrapEnvironment(self.workspace, scopes)
self.fail()
+ def test_git_worktree(self):
+ """Check that the SDE will recognize a git worktree.
+
+ Specifically verifies duplicate external dependencies in the git
+ worktree are ignored that are discovered during SDE initialization.
+ """
+ # The workspace should not contain a git repo yet
+ repo_path = pygit2.discover_repository(self.workspace)
+ self.assertIsNone(repo_path)
+
+ # Init a git repo in the workspace
+ pygit2.init_repository(self.workspace, initial_head='master')
+ repo_path = pygit2.discover_repository(self.workspace)
+ self.assertIsNotNone(repo_path)
+
+ repo = pygit2.Repository(self.workspace)
+
+ # Create a UEFI tree
+ repo_tree = uefi_tree(self.workspace, create_platform=True)
+ self.assertIsNotNone(repo_tree)
+
+ # Add ext deps to the tree
+ repo_tree.create_ext_dep("nuget", "NuGet.CommandLine", "5.2.0")
+ repo_tree.create_ext_dep("nuget", "NuGet.LibraryModel", "5.6.0")
+
+ # Commit the UEFI tree to the master branch
+ self.assertNotIn('master', repo.branches)
+ index = repo.index
+ index.add_all()
+ index.write()
+ author = pygit2.Signature('SDE Unit Test', '[email protected]')
+ message = "Add initial platform UEFI worktree"
+ tree = index.write_tree()
+ parents = []
+ repo.create_commit('HEAD', author, author, message, tree, parents)
+ self.assertIn('master', repo.branches)
+
+ # Create the worktree branch
+ worktree_branch = repo.branches.local.create('worktree_branch', commit=repo[repo.head.target])
+ self.assertIn('worktree_branch', repo.branches)
+
+ # Create a worktree on the worktree branch in the git repo
+ self.assertFalse(repo.list_worktrees())
+ repo.add_worktree('test_workspace', os.path.join(self.workspace, '.trees'), worktree_branch)
+ worktrees = repo.list_worktrees()
+ self.assertIn('test_workspace', worktrees)
+
+ # Because this is a subtree, the duplicate ext_deps should be ignored
+ # that are present in the worktree
+ self_describing_environment.BootstrapEnvironment(self.workspace, ('global',))
+
if __name__ == '__main__':
unittest.main()
|
[Feature]: Respect git worktrees
### What does the feature solve?
Git worktrees are a standard, well-defined git feature (see [git-worktree](https://git-scm.com/docs/git-worktree)).
The Self-Describing Environment (SDE) currently does not recognize git worktrees which means it fails with multiple descriptors present since the worktree is a copy/variation of the base repo.
The SDE should recognize git worktrees and not load descriptors from the worktrees if they are not the active workspace.
### Describe the solution
Detect git worktrees. If a git worktree is not the active workspace, then do not allow the Self-Describing Environment to process it.
### Have you considered any alternatives?
_No response_
### Additional context
_No response_
|
0.0
|
9e5ecb2aa7aae5a33ca57f7e2c95eb09c379295f
|
[
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_git_worktree"
] |
[
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_multiple_override_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_collect_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_override_path_env_swapped_order",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_duplicate_id_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_collect_path_env_scoped",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_override_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_unique_scopes_required",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_null_init",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_duplicate_id_path_env_2"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-14 19:34:03+00:00
|
bsd-2-clause
| 5,901 |
|
tianocore__edk2-pytool-extensions-372
|
diff --git a/docs/user/integrate/build.md b/docs/user/integrate/build.md
index ba3b908..9f25c26 100644
--- a/docs/user/integrate/build.md
+++ b/docs/user/integrate/build.md
@@ -125,10 +125,14 @@ It's easy set and get environment variables in the FDF, DSC, Settings file, and
| **Type** | **Set** | **Get** |
|---------------|--------------------------|---------------------------------|
-| Command Line | VAR=Value | N/A |
+| Command Line | VAR [= Value] | N/A |
| FDF/DSC | DEFINE VAR = Value | $(VAR) |
| Settings File | env.SetValue(Var, Value) | env.GetValue(Var, DefaultValue) |
+To support parity with Edk2's build command line option -D/--define, variables passed via the command line are not
+required to have a value associated with them. Variables defined this way are considered non-valued variable defines
+and should be checked for existence rather then value (i.e. `if env.GetValue(var):` or `if not env.GetValue(var)`).
+
While you can set and get variables anywhere in the `UefiBuilder` portion of the settings file, we provide the following
two methods to set environment variables, ensuring they are available everywhere that you are allowed to customize:
@@ -155,16 +159,16 @@ value of profile during `SetPlatformEnv()` and make our build customizations fro
def __init__(self):
self.profiles = {
- "DEV" : {"TARGET" : "DEBUG", "EDK_SHELL": "TRUE"},
- "SELFHOST" : {"TARGET" : "RELEASE", "EDK_SHELL": "TRUE"},
- "RELEASE" : {"TARGET" : "RELEASE", "EDK_SHELL": "FALSE"}
+ "DEV" : {"TARGET" : "DEBUG", "EDK_SHELL": ""},
+ "SELFHOST" : {"TARGET" : "RELEASE", "EDK_SHELL": ""},
+ "RELEASE" : {"TARGET" : "RELEASE"}
}
...
def SetPlatformEnv(self):
- profile = self.env.GetValue("PROFILE", "DEV") # Default DEV
- if profile in self.profiles:
- for key, value in profile.items():
+ build_profile = self.env.GetValue("PROFILE", "DEV") # Default DEV
+ if build_profile in self.profiles:
+ for key, value in self.profile[build_profile].items():
self.env.SetValue(key, value, "PROFILE VALUE")
...
```
@@ -173,7 +177,7 @@ The environment variables are set, whats next? The target is automatically picke
needs to be done is to add the logic of including the Edk shell or not. This can be done in the platform fdf as seen below:
``` shell
-!if $(EDK_SHELL= TRUE)
+!if $(EDK_SHELL)
FILE APPLICATION = PCD(gPcBdsPkgTokenSpaceGuid.PcdShellFile) {
SECTION PE32 = <SomePath>/Shell.efi
SECTION UI = "EdkShell"
diff --git a/docs/user/using/build.md b/docs/user/using/build.md
index 651f861..6fafe79 100644
--- a/docs/user/using/build.md
+++ b/docs/user/using/build.md
@@ -68,14 +68,18 @@ stuart_build -c path/to/SettingsFile.py
Yes! Build values can be set and passed to the build command via the command
line or from within your platform build file
[Read More](/integrate/build#setting-getting-environment-variables).
-You define a build value via `BLD_*_<Value>` for all builds,
-`BLD_DEBUG_<Value>` for debug builds, and `BLD_RELEASE_<Value>` for release
+You define a build value via `BLD_*_<VAR>=<VALUE>` for all builds,
+`BLD_DEBUG_<VAR>=VALUE` for debug builds, and `BLD_RELEASE_<VAR>=VALUE` for release
builds.
+Non-valued build defines are also supported! Simply follow the above nomenclature
+without providing a value i.e. `VLD_*_<VAR>`
+
From the command line:
```cmd
-\> stuart_build -c Platform/QemuQ35Pkg/PlatformBuild.py BLD_*_SHIP_MODE=FALSE
+\> stuart_build -c Platforms/QemuQ35Pkg/PlatformBuild.py BLD_*_SHIP_MODE=FALSE
+\> stuart_build -c Platforms/QemuQ35Pkg/PlatformBuild.py BLD_*_E1000_ENABLE
```
From within the Platform build file:
@@ -84,5 +88,6 @@ From within the Platform build file:
def SetPlatformEnv(self):
...
self.env.SetValue("BLD_*_SHIP_MODE", "FALSE", "Default")
+ self.env.SetValue("BLD_*_E1000", None, "Default")
...
```
diff --git a/edk2toolext/edk2_invocable.py b/edk2toolext/edk2_invocable.py
index c41fbe3..3b36927 100644
--- a/edk2toolext/edk2_invocable.py
+++ b/edk2toolext/edk2_invocable.py
@@ -22,6 +22,8 @@
import inspect
import pkg_resources
import argparse
+from random import choice
+from string import ascii_letters
from typing import Iterable, Tuple
from textwrap import dedent
from edk2toolext.environment import shell_environment
@@ -331,10 +333,13 @@ def ParseCommandLineOptions(self):
epilog = dedent('''\
positional arguments:
<key>=<value> - Set an env variable for the pre/post build process
+ <key> - Set a non-valued env variable for the pre/post build process
BLD_*_<key>=<value> - Set a build flag for all build types
(key=value will get passed to build process)
+ BLD_*_<key> - Set a non-valued build flag for all build types
BLD_<TARGET>_<key>=<value> - Set a build flag for build type of <target>
(key=value will get passed to build process for given build type)
+ BLD_<TARGET>_<key> - Set a non-valued build flag for a build type of <target>
''')
parserObj = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter,)
@@ -414,18 +419,29 @@ def ParseCommandLineOptions(self):
self.PlatformSettings.RetrieveCommandLineOptions(args)
#
- # Look through unknown_args and BuildConfig for strings that are x=y,
- # set env.SetValue(x, y),
- # then remove this item from the list.
+ # Look through unknown_args and BuildConfig for strings that are:
+ # 1. x=y, -> set env.SetValue(x, y),
+ # 2. x, -> set env.SetValue(x, random_string)
+ # then remove these items from the list.
#
+ # Non valued build variables (#2) set the value to a random string
+ # as the expectation is that any developer using this functionality
+ # check for the existence of the build variable rather then the value
+ # of the variable. This is to have parity between edk2's build -D
+ # flag and stuart.
env = shell_environment.GetBuildVars()
BuildConfig = os.path.abspath(args.build_config)
for argument in unknown_args:
- if argument.count("=") != 1:
+ if argument.count("=") == 1:
+ tokens = argument.strip().split("=")
+ env.SetValue(tokens[0].strip().upper(), tokens[1].strip(), "From CmdLine")
+ elif argument.count("=") == 0:
+ env.SetValue(argument.strip().upper(),
+ ''.join(choice(ascii_letters) for _ in range(20)),
+ "Non valued variable set From cmdLine")
+ else:
raise RuntimeError(f"Unknown variable passed in via CLI: {argument}")
- tokens = argument.strip().split("=")
- env.SetValue(tokens[0].strip().upper(), tokens[1].strip(), "From CmdLine")
unknown_args.clear() # remove the arguments we've already consumed
@@ -438,7 +454,12 @@ def ParseCommandLineOptions(self):
unknown_args.append(stripped_line)
for argument in unknown_args:
- if argument.count("=") != 1:
+ if argument.count("=") == 1:
+ tokens = argument.strip().split("=")
+ env.SetValue(tokens[0].strip().upper(), tokens[1].strip(), "From BuildConf")
+ elif argument.count("=") == 0:
+ env.SetValue(argument.strip().upper(),
+ ''.join(choice(ascii_letters) for _ in range(20)),
+ "Non valued variable set from BuildConfig")
+ else:
raise RuntimeError(f"Unknown variable passed in via BuildConfig: {argument}")
- tokens = argument.strip().split("=")
- env.SetValue(tokens[0].strip().upper(), tokens[1].strip(), "From BuildConf")
diff --git a/edk2toolext/environment/var_dict.py b/edk2toolext/environment/var_dict.py
index 73f9033..dc3befc 100644
--- a/edk2toolext/environment/var_dict.py
+++ b/edk2toolext/environment/var_dict.py
@@ -15,6 +15,8 @@
"""
import logging
+from random import choice
+from string import ascii_letters
class EnvEntry(object):
@@ -140,7 +142,8 @@ def SetValue(self, k, v, comment, overridable=False):
Args:
k (str): The key to store the value under
- v (varied): The value to store
+ v (varied | None): The value to store as a string, or None to store
+ a non valued build variable
comment (str): A comment to show where / how the variable was stored.
Useful for debugging
overridable (bool): Specifies if the variable is allowed to be override
@@ -151,7 +154,10 @@ def SetValue(self, k, v, comment, overridable=False):
"""
key = k.upper()
en = self.GetEntry(key)
- value = str(v)
+ if not v:
+ value = ''.join(choice(ascii_letters) for _ in range(20))
+ else:
+ value = str(v)
self.Logger.debug("Trying to set key %s to value %s" % (k, v))
if (en is None):
# new entry
|
tianocore/edk2-pytool-extensions
|
cf18f2eb12baf698fb579330786caf1deb821c74
|
diff --git a/edk2toolext/tests/test_edk2_setup.py b/edk2toolext/tests/test_edk2_setup.py
index 9bb3caa..2c91fe3 100644
--- a/edk2toolext/tests/test_edk2_setup.py
+++ b/edk2toolext/tests/test_edk2_setup.py
@@ -79,3 +79,85 @@ def test_setup_bad_omnicache_path(self):
except SystemExit as e:
self.assertEqual(e.code, 0, "We should have a non zero error code")
pass
+
+ def test_parse_command_line_options_pass(self):
+ builder = Edk2PlatformSetup()
+ settings_file = os.path.join(self.minimalTree, "settings.py")
+ sys.argv = ["stuart_setup",
+ "-c", settings_file,
+ "BLD_*_VAR",
+ "VAR",
+ "BLD_DEBUG_VAR2",
+ "BLD_RELEASE_VAR2",
+ "TEST_VAR=TEST",
+ "BLD_*_TEST_VAR2=TEST"]
+
+ try:
+ builder.Invoke()
+ except SystemExit as e:
+ self.assertEqual(e.code, 0)
+
+ env = shell_environment.GetBuildVars()
+ self.assertIsNotNone(env.GetValue("BLD_*_VAR"))
+ self.assertIsNotNone(env.GetValue("VAR"))
+ self.assertIsNotNone(env.GetValue("BLD_DEBUG_VAR2"))
+ self.assertIsNotNone(env.GetValue("BLD_RELEASE_VAR2"))
+ self.assertEqual(env.GetValue("TEST_VAR"), "TEST")
+ self.assertEqual(env.GetValue("BLD_*_TEST_VAR2"), "TEST")
+
+ def test_parse_command_line_options_fail(self):
+
+ for arg in ["BLD_*_VAR=5=10", "BLD_DEBUG_VAR2=5=5", "BLD_RELEASE_VAR3=5=5", "VAR=10=10"]:
+ builder = Edk2PlatformSetup()
+ settings_file = os.path.join(self.minimalTree, "settings.py")
+ sys.argv = ["stuart_setup",
+ "-c", settings_file,
+ arg]
+ try:
+ builder.Invoke()
+ except RuntimeError as e:
+ self.assertTrue(str(e).startswith(f"Unknown variable passed in via CLI: {arg}"))
+
+ def test_conf_file_pass(self):
+ builder = Edk2PlatformSetup()
+ settings_file = os.path.join(self.minimalTree, "settings.py")
+ with open(os.path.join(self.minimalTree, 'BuildConfig.conf'), 'x') as f:
+ f.writelines([
+ "BLD_*_VAR",
+ "\nVAR",
+ "\nBLD_DEBUG_VAR2",
+ "\nBLD_RELEASE_VAR2",
+ "\nTEST_VAR=TEST",
+ "\nBLD_*_TEST_VAR2=TEST"
+ ])
+
+ sys.argv = ["stuart_setup", "-c", settings_file]
+
+ try:
+ builder.Invoke()
+ except SystemExit as e:
+ self.assertEqual(e.code, 0)
+
+ env = shell_environment.GetBuildVars()
+ self.assertIsNotNone(env.GetValue("BLD_*_VAR"))
+ self.assertIsNotNone(env.GetValue("VAR"))
+ self.assertIsNotNone(env.GetValue("BLD_DEBUG_VAR2"))
+ self.assertIsNotNone(env.GetValue("BLD_RELEASE_VAR2"))
+ self.assertEqual(env.GetValue("TEST_VAR"), "TEST")
+ self.assertEqual(env.GetValue("BLD_*_TEST_VAR2"), "TEST")
+
+ def test_conf_file_fail(self):
+ builder = Edk2PlatformSetup()
+ settings_file = os.path.join(self.minimalTree, "settings.py")
+ arg = "BLD_*_VAR=5=5"
+ with open(os.path.join(self.minimalTree, 'BuildConfig.conf'), 'x') as f:
+ f.writelines([
+ arg,
+ ])
+
+ sys.argv = ["stuart_setup", "-c", settings_file]
+
+ try:
+ builder.Invoke()
+ except RuntimeError as e:
+ self.assertTrue(str(e).startswith(f"Unknown variable passed in via BuildConfig: {arg}"))
|
Support non-valued BLD defines, e.g. "-D E1000_ENABLE" without =<foo>
The EDK2 [OvmfPkg README](https://github.com/tianocore/edk2/blob/master/OvmfPkg/README) instructs users to add non-valued "-D" defines to enable features.
OvmfPkg/README:
> - Add "-D E1000_ENABLE" to your build command (only when building
> "OvmfPkg/OvmfPkgIa32X64.dsc" or "OvmfPkg/OvmfPkgX64.dsc").
> - For example: "build -D E1000_ENABLE".
stuart_build throws an exception if I do not pass a `=<value>` in a BLD_ parameter, for example:
```
stuart_build -c PlatformBuild.py BLD_*_E1000_ENABLE
Traceback (most recent call last):
File "C:\Program Files\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Program Files\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\git\venv_pytools\Scripts\stuart_build.exe\__main__.py", line 7, in <module>
File "c:\git\venv_pytools\lib\site-packages\edk2toolext\invocables\edk2_platform_build.py", line 141, in main
Edk2PlatformBuild().Invoke()
File "c:\git\venv_pytools\lib\site-packages\edk2toolext\base_abstract_invocable.py", line 107, in Invoke
self.ParseCommandLineOptions()
File "c:\git\venv_pytools\lib\site-packages\edk2toolext\edk2_invocable.py", line 203, in ParseCommandLineOptions
raise RuntimeError(f"Unknown variable passed in via CLI: {argument}")
RuntimeError: Unknown variable passed in via CLI: BLD_*_E1000_ENABLE
```
A workaround is to specify a dummy value **`=1`**, for example:
`stuart_build -c PlatformBuild.py BLD_*_E1000_ENABLE=1`
|
0.0
|
cf18f2eb12baf698fb579330786caf1deb821c74
|
[
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_conf_file_pass",
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_parse_command_line_options_pass"
] |
[
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_ci_setup",
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_conf_file_fail",
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_init",
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_parse_command_line_options_fail",
"edk2toolext/tests/test_edk2_setup.py::TestEdk2Setup::test_setup_bad_omnicache_path"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-15 22:21:37+00:00
|
bsd-2-clause
| 5,902 |
|
tianocore__edk2-pytool-extensions-41
|
diff --git a/edk2toolext/environment/extdeptypes/web_dependency.py b/edk2toolext/environment/extdeptypes/web_dependency.py
index fe6d7d7..b3af6cb 100644
--- a/edk2toolext/environment/extdeptypes/web_dependency.py
+++ b/edk2toolext/environment/extdeptypes/web_dependency.py
@@ -142,6 +142,8 @@ def fetch(self):
# If we just downloaded a file, we need to create a directory named self.contents_dir,
# copy the file inside, and name it self.internal_path
else:
+ os.makedirs(self.contents_dir, exist_ok=True)
+ complete_internal_path = os.path.join(self.contents_dir, self.internal_path)
logging.info(f"Copying file to {complete_internal_path}")
shutil.move(temp_file_path, complete_internal_path)
|
tianocore/edk2-pytool-extensions
|
6df5a46d4827645afc87967ad9216cf698e70648
|
diff --git a/edk2toolext/tests/test_web_dependency.py b/edk2toolext/tests/test_web_dependency.py
index 3798161..1692837 100644
--- a/edk2toolext/tests/test_web_dependency.py
+++ b/edk2toolext/tests/test_web_dependency.py
@@ -1,4 +1,4 @@
-## @file test_web_dependency.py
+# @file test_web_dependency.py
# Unit test suite for the WebDependency class.
#
##
@@ -14,6 +14,7 @@
import tarfile
import zipfile
import tempfile
+import json
import urllib.request
from edk2toolext.environment import environment_descriptor_files as EDF
from edk2toolext.environment.extdeptypes.web_dependency import WebDependency
@@ -32,6 +33,17 @@
"sha256":"68f2335344c3f7689f8d69125d182404a3515b8daa53a9c330f115739889f998"
}
'''
+# JSON file that describes a single file to download from the internet
+# bing.com was choosen as it's probably not going anywhere soon and it's small file to download
+single_file_extdep = {
+ "scope": "global",
+ "type": "web",
+ "name": "test",
+ "source": "https://www.bing.com/",
+ "version": "20190805",
+ "flags": [],
+ "internal_path": "test.txt"
+}
def prep_workspace():
@@ -81,6 +93,21 @@ def test_fail_with_bad_url(self):
ext_dep.fetch()
self.fail("should have thrown an Exception")
+ # try to download a single file from the internet
+ def test_single_file(self):
+ ext_dep_file_path = os.path.join(test_dir, "good_ext_dep.json")
+ with open(ext_dep_file_path, "w+") as ext_dep_file:
+ ext_dep_file.write(json.dumps(single_file_extdep)) # dump to a file
+
+ ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents
+ ext_dep = WebDependency(ext_dep_descriptor)
+ ext_dep.fetch()
+
+ ext_dep_name = single_file_extdep['name'] + "_extdep"
+ file_path = os.path.join(test_dir, ext_dep_name, single_file_extdep['internal_path'])
+ if not os.path.isfile(file_path):
+ self.fail("The downloaded file isn't there")
+
# Test that get_internal_path_root works the way we expect with a flat directory structure.
# test_dir\inner_dir - test_dir\inner_dir should be the root.
def test_get_internal_path_root_flat(self):
@@ -96,8 +123,8 @@ def test_get_internal_path_root_with_subfolders(self):
first_level_dir_name = "first_dir"
second_level_dir_name = "second_dir"
inner_dir_path = os.path.join(outer_dir, first_level_dir_name)
- self.assertEqual(WebDependency.get_internal_path_root(outer_dir,
- os.path.join(first_level_dir_name, second_level_dir_name)), inner_dir_path)
+ inner_second_dir_path = os.path.join(first_level_dir_name, second_level_dir_name)
+ self.assertEqual(WebDependency.get_internal_path_root(outer_dir, inner_second_dir_path), inner_dir_path)
# Test that a single file zipped is able to be processed by unpack.
def test_unpack_zip_file(self):
|
web dependency doesn't work on single files
when downloading single files (like a binary from github) it fails to create the ext_dep folder
|
0.0
|
6df5a46d4827645afc87967ad9216cf698e70648
|
[
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_single_file"
] |
[
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_fail_with_bad_url",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_get_internal_path_root_flat",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_get_internal_path_root_with_subfolders",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_multi_level_directory",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_tar_uses_linux_path_sep",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_unpack_tar_directory",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_unpack_tar_file",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_unpack_zip_directory",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_unpack_zip_file",
"edk2toolext/tests/test_web_dependency.py::TestWebDependency::test_zip_uses_linux_path_sep"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-30 18:16:01+00:00
|
bsd-2-clause
| 5,903 |
|
tianocore__edk2-pytool-extensions-419
|
diff --git a/edk2toolext/environment/extdeptypes/git_dependency.py b/edk2toolext/environment/extdeptypes/git_dependency.py
index 3b67974..be3e94d 100644
--- a/edk2toolext/environment/extdeptypes/git_dependency.py
+++ b/edk2toolext/environment/extdeptypes/git_dependency.py
@@ -83,7 +83,17 @@ def clean(self):
super().clean()
def verify(self):
- """Verifies the clone was successful."""
+ """Verifies the clone was successful.
+
+ !!! Note
+ If verify is set to false in the dependencies state file,
+ it will always skip the verification process.
+ """
+ state_data = self.get_state_file_data()
+ if state_data and state_data['verify'] is False:
+ logging.warn(f'{self.name} is unverified. Unexpected results may occur.')
+ return True
+
result = True
if not os.path.isdir(self._local_repo_root_path):
diff --git a/edk2toolext/environment/external_dependency.py b/edk2toolext/environment/external_dependency.py
index ef2aec3..0a38bd9 100644
--- a/edk2toolext/environment/external_dependency.py
+++ b/edk2toolext/environment/external_dependency.py
@@ -65,7 +65,7 @@ def __init__(self, descriptor):
self.contents_dir = os.path.join(
self.descriptor_location, self.name + "_extdep")
self.state_file_path = os.path.join(
- self.contents_dir, "extdep_state.json")
+ self.contents_dir, "extdep_state.yaml")
self.published_path = self.compute_published_path()
def set_global_cache_path(self, global_cache_path):
@@ -177,19 +177,8 @@ def copy_to_global_cache(self, source_path: str):
def verify(self):
"""Verifies the dependency was successfully downloaded."""
result = True
- state_data = None
+ state_data = self.get_state_file_data()
- # See whether or not the state file exists.
- if not os.path.isfile(self.state_file_path):
- result = False
-
- # Attempt to load the state file.
- if result:
- with open(self.state_file_path, 'r') as file:
- try:
- state_data = yaml.safe_load(file)
- except Exception:
- pass
if state_data is None:
result = False
@@ -202,15 +191,27 @@ def verify(self):
def report_version(self):
"""Reports the version of the external dependency."""
+ state_data = self.get_state_file_data()
+ version = self.version
+ if state_data and state_data['verify'] is False:
+ version = "UNVERIFIED"
version_aggregator.GetVersionAggregator().ReportVersion(self.name,
- self.version,
+ version,
version_aggregator.VersionTypes.INFO,
self.descriptor_location)
def update_state_file(self):
"""Updates the file representing the state of the dependency."""
with open(self.state_file_path, 'w+') as file:
- yaml.dump({'version': self.version}, file)
+ yaml.dump({'version': self.version, 'verify': True}, file)
+
+ def get_state_file_data(self):
+ """Loads the state file data into a json file and returns it."""
+ try:
+ with open(self.state_file_path, 'r') as file:
+ return yaml.safe_load(file)
+ except Exception:
+ return None
def ExtDepFactory(descriptor):
diff --git a/edk2toolext/invocables/edk2_platform_build.py b/edk2toolext/invocables/edk2_platform_build.py
index 6bec14d..b24f2cd 100644
--- a/edk2toolext/invocables/edk2_platform_build.py
+++ b/edk2toolext/invocables/edk2_platform_build.py
@@ -68,12 +68,29 @@ def AddCommandLineOptions(self, parserObj):
except (TypeError):
raise RuntimeError(f"UefiBuild not found in module:\n{dir(self.PlatformModule)}")
+ parserObj.add_argument('-nv', '-NV', '--noverify', '--NOVERIFY', '--NoVerify',
+ dest="verify", default=True, action='store_false',
+ help='Skip verifying external dependencies before build.')
self.PlatformBuilder.AddPlatformCommandLineOptions(parserObj)
def RetrieveCommandLineOptions(self, args):
"""Retrieve command line options from the argparser."""
+ self.verify = args.verify
self.PlatformBuilder.RetrievePlatformCommandLineOptions(args)
+ def GetVerifyCheckRequired(self) -> bool:
+ """Will call self_describing_environment.VerifyEnvironment if this returns True.
+
+ !!! hint
+ Optional override in a subclass
+
+ Returns:
+ (bool): whether verify check is required or not
+ """
+ if not self.verify:
+ logging.warning("Skipping Environment Verification. Unexpected results may occur.")
+ return self.verify
+
def GetSettingsClass(self):
"""Returns the BuildSettingsManager class.
|
tianocore/edk2-pytool-extensions
|
c5218ea5b28f107a5a198b5e1e7a983be93bcd4b
|
diff --git a/edk2toolext/tests/test_edk2_update.py b/edk2toolext/tests/test_edk2_update.py
index c712e86..25ee5b5 100644
--- a/edk2toolext/tests/test_edk2_update.py
+++ b/edk2toolext/tests/test_edk2_update.py
@@ -84,7 +84,7 @@ def test_one_level_recursive(self):
updater = self.invoke_update(tree.get_settings_provider_path())
# make sure it worked
self.assertTrue(os.path.exists(os.path.join(WORKSPACE, "Edk2TestUpdate_extdep",
- "NuGet.CommandLine_extdep", "extdep_state.json")))
+ "NuGet.CommandLine_extdep", "extdep_state.yaml")))
build_env, shell_env, failure = updater.PerformUpdate()
# we should have no failures
self.assertEqual(failure, 0)
diff --git a/edk2toolext/tests/test_self_describing_environment.py b/edk2toolext/tests/test_self_describing_environment.py
index 2277791..3fcf43d 100644
--- a/edk2toolext/tests/test_self_describing_environment.py
+++ b/edk2toolext/tests/test_self_describing_environment.py
@@ -10,6 +10,7 @@
import pygit2
import unittest
import tempfile
+import yaml
from edk2toolext.environment import self_describing_environment
from edk2toolext.tests.uefi_tree import uefi_tree
from edk2toolext.environment import version_aggregator
@@ -166,6 +167,33 @@ def test_git_worktree(self):
# that are present in the worktree
self_describing_environment.BootstrapEnvironment(self.workspace, ('global',))
+ def test_no_verify_extdep(self):
+ tree = uefi_tree(self.workspace, create_platform=False)
+ tree.create_ext_dep(dep_type="git",
+ scope="global",
+ name="HelloWorld",
+ source="https://github.com/octocat/Hello-World.git",
+ version="7fd1a60b01f91b314f59955a4e4d4e80d8edf11d")
+
+ # Bootstrap the environment
+ self_describing_environment.BootstrapEnvironment(self.workspace, ("global",))
+ self_describing_environment.UpdateDependencies(self.workspace, scopes=("global",))
+ self_describing_environment.VerifyEnvironment(self.workspace, scopes=("global",))
+
+ # Delete the readme to make the repo dirty then verify it fails
+ readme = os.path.join(tree.get_workspace(), "HelloWorld_extdep", "HelloWorld", "README")
+ os.remove(readme)
+ self.assertFalse(self_describing_environment.VerifyEnvironment(self.workspace, scopes=("global",)))
+
+ # Update the state file to not verify the specific external dependency then verify it passes
+ state_file = os.path.join(tree.get_workspace(), "HelloWorld_extdep", "extdep_state.yaml")
+ with open(state_file, 'r+') as f:
+ content = yaml.safe_load(f)
+ f.seek(0)
+ content["verify"] = False
+ yaml.safe_dump(content, f)
+ self.assertTrue(self_describing_environment.VerifyEnvironment(self.workspace, scopes=("global",)))
+
if __name__ == '__main__':
unittest.main()
|
[Feature]: Allow extdep to use local resource for co-development
### What does the feature solve?
When using extdep for a co-developed component such as in a git or nuget extdep. Currently, you can overrite the location using a build environment variable, but this does not work for build plugins or other self-describing environment components.
### Describe the solution
Add a option similar to "pip install -e" that allows an extdep to symlink to a local copy of the resource to support co-development. This should be saved into the state file so that the pre-build check also knows to ignore the version or dirty state checks. This could be done by running a standalone command or adding flag options to stuart_update.
### Have you considered any alternatives?
_No response_
### Additional context
This is part of an effort Project MU is trying to create more stand-alone feature repos that can be consumed not as a submodule but as a complete package either through nuget or a git extdep. This is used for the following repos.
https://github.com/microsoft/mu_feature_ipmi
https://github.com/microsoft/mu_feature_mm_supv
https://github.com/microsoft/mu_feature_config
|
0.0
|
c5218ea5b28f107a5a198b5e1e7a983be93bcd4b
|
[
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_no_verify_extdep"
] |
[
"edk2toolext/tests/test_edk2_update.py::TestEdk2Update::test_bad_ext_dep",
"edk2toolext/tests/test_edk2_update.py::TestEdk2Update::test_duplicate_ext_deps",
"edk2toolext/tests/test_edk2_update.py::TestEdk2Update::test_init",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_collect_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_collect_path_env_scoped",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_duplicate_id_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_duplicate_id_path_env_2",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_git_worktree",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_multiple_override_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_null_init",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_override_path_env",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_override_path_env_swapped_order",
"edk2toolext/tests/test_self_describing_environment.py::Testself_describing_environment::test_unique_scopes_required"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-27 17:22:45+00:00
|
bsd-2-clause
| 5,904 |
|
tianocore__edk2-pytool-library-114
|
diff --git a/edk2toollib/uefi/edk2/parsers/base_parser.py b/edk2toollib/uefi/edk2/parsers/base_parser.py
index d2179d1..2974082 100644
--- a/edk2toollib/uefi/edk2/parsers/base_parser.py
+++ b/edk2toollib/uefi/edk2/parsers/base_parser.py
@@ -7,6 +7,7 @@
##
import os
import logging
+from edk2toollib.uefi.edk2 import path_utilities
class BaseParser(object):
@@ -24,8 +25,8 @@ def __init__(self, log="BaseParser"):
self.ConditionalStack = []
self.RootPath = ""
self.PPs = []
- self.TargetFile = None
- self.TargetFilePath = None
+ self._Edk2PathUtil = None
+ self.TargetFilePath = None # the abs path of the target file
self.CurrentLine = -1
self._MacroNotDefinedValue = "0" # value to used for undefined macro
@@ -42,19 +43,27 @@ def SetBaseAbsPath(self, path):
Returns:
"""
- self.RootPath = path
+ self.RootPath = os.path.abspath(path)
+ self._ConfigEdk2PathUtil()
return self
+ def _ConfigEdk2PathUtil(self):
+ ''' creates the path utility object based on the root path and package paths '''
+ self._Edk2PathUtil = path_utilities.Edk2Path(self.RootPath, self.PPs, error_on_invalid_pp=False)
+
def SetPackagePaths(self, pps=[]):
"""
Args:
pps: (Default value = [])
+ This must be called after SetBaseAbsPath
+
Returns:
"""
self.PPs = pps
+ self._ConfigEdk2PathUtil()
return self
def SetInputVars(self, inputdict):
@@ -71,37 +80,43 @@ def SetInputVars(self, inputdict):
def FindPath(self, *p):
"""
-
+ Given a path, it will find it relative to the root, the current target file, or the packages path
Args:
- *p:
+ *p: any number of strings or path like objects
- Returns:
+ Returns: a full absolute path if the file exists, None on failure
"""
- # NOTE: Some of this logic should be replaced
- # with the path resolution from Edk2Module code.
+ # check if we're getting a None
+ if p is None or (len(p) == 1 and p[0] is None):
+ return None
+
+ Path = os.path.join(*p)
+ # check if it it is absolute
+ if os.path.isabs(Path) and os.path.exists(Path):
+ return Path
# If the absolute path exists, return it.
Path = os.path.join(self.RootPath, *p)
if os.path.exists(Path):
- return Path
+ return os.path.abspath(Path)
# If that fails, check a path relative to the target file.
if self.TargetFilePath is not None:
- Path = os.path.join(self.TargetFilePath, *p)
+ Path = os.path.abspath(os.path.join(os.path.dirname(self.TargetFilePath), *p))
if os.path.exists(Path):
- return Path
+ return os.path.abspath(Path)
# If that fails, check in every possible Pkg path.
- for Pkg in self.PPs:
- Path = os.path.join(self.RootPath, Pkg, *p)
- if os.path.exists(Path):
+ if self._Edk2PathUtil is not None:
+ target_path = os.path.join(*p)
+ Path = self._Edk2PathUtil.GetAbsolutePathOnThisSytemFromEdk2RelativePath(target_path, False)
+ if Path is not None:
return Path
# log invalid file path
- Path = os.path.join(self.RootPath, *p)
- self.Logger.error("Invalid file path %s" % Path)
- return Path
+ self.Logger.error(f"Invalid file path: {p}")
+ return None
def WriteLinesToFile(self, filepath):
"""
diff --git a/edk2toollib/uefi/edk2/parsers/dsc_parser.py b/edk2toollib/uefi/edk2/parsers/dsc_parser.py
index bc73ba4..d783100 100644
--- a/edk2toollib/uefi/edk2/parsers/dsc_parser.py
+++ b/edk2toollib/uefi/edk2/parsers/dsc_parser.py
@@ -47,9 +47,12 @@ def __ParseLine(self, Line, file_name=None, lineno=None):
if(line_resolved.strip().lower().startswith("!include")):
# include line.
tokens = line_resolved.split()
- self.Logger.debug("Opening Include File %s" % os.path.join(self.RootPath, tokens[1]))
- sp = self.FindPath(tokens[1])
- self._dsc_file_paths.add(sp)
+ include_file = tokens[1]
+ sp = self.FindPath(include_file)
+ if sp is None:
+ raise FileNotFoundError(include_file)
+ self.Logger.debug("Opening Include File %s" % sp)
+ self._PushTargetFile(sp)
lf = open(sp, "r")
loc = lf.readlines()
lf.close()
@@ -182,8 +185,12 @@ def __ParseDefineLine(self, Line):
if(line_resolved.strip().lower().startswith("!include")):
# include line.
tokens = line_resolved.split()
- self.Logger.debug("Opening Include File %s" % os.path.join(self.RootPath, tokens[1]))
- sp = self.FindPath(tokens[1])
+ include_file = tokens[1]
+ self.Logger.debug("Opening Include File %s" % include_file)
+ sp = self.FindPath(include_file)
+ if sp is None:
+ raise FileNotFoundError(include_file)
+ self._PushTargetFile(sp)
lf = open(sp, "r")
loc = lf.readlines()
lf.close()
@@ -297,20 +304,25 @@ def ResetParserState(self):
def ParseFile(self, filepath):
self.Logger.debug("Parsing file: %s" % filepath)
- self.TargetFile = os.path.abspath(filepath)
- self.TargetFilePath = os.path.dirname(self.TargetFile)
- sp = os.path.join(filepath)
- self._dsc_file_paths.add(sp)
+ sp = self.FindPath(filepath)
+ if sp is None:
+ raise FileNotFoundError(filepath)
+ self._PushTargetFile(sp)
f = open(sp, "r")
# expand all the lines and include other files
file_lines = f.readlines()
self.__ProcessDefines(file_lines)
# reset the parser state before processing more
self.ResetParserState()
+ self._PushTargetFile(sp)
self.__ProcessMore(file_lines, file_name=sp)
f.close()
self.Parsed = True
+ def _PushTargetFile(self, targetFile):
+ self.TargetFilePath = os.path.abspath(targetFile)
+ self._dsc_file_paths.add(self.TargetFilePath)
+
def GetMods(self):
return self.ThreeMods + self.SixMods
diff --git a/edk2toollib/uefi/edk2/path_utilities.py b/edk2toollib/uefi/edk2/path_utilities.py
index a4ebdce..ea76674 100644
--- a/edk2toollib/uefi/edk2/path_utilities.py
+++ b/edk2toollib/uefi/edk2/path_utilities.py
@@ -103,7 +103,7 @@ def GetEdk2RelativePathFromAbsolutePath(self, abspath):
self.logger.error("AbsolutePath: %s" % abspath)
return None
- def GetAbsolutePathOnThisSytemFromEdk2RelativePath(self, relpath):
+ def GetAbsolutePathOnThisSytemFromEdk2RelativePath(self, relpath, log_errors=True):
''' Given a edk2 relative path return an absolute path to the file
in this workspace.
@@ -124,8 +124,9 @@ def GetAbsolutePathOnThisSytemFromEdk2RelativePath(self, relpath):
abspath = os.path.join(a, relpath)
if(os.path.exists(abspath)):
return abspath
- self.logger.error("Failed to convert Edk2Relative Path to an Absolute Path on this system.")
- self.logger.error("Relative Path: %s" % relpath)
+ if log_errors:
+ self.logger.error("Failed to convert Edk2Relative Path to an Absolute Path on this system.")
+ self.logger.error("Relative Path: %s" % relpath)
return None
|
tianocore/edk2-pytool-library
|
ec7d9a9fb1f490bb82301f1a28eac6098fe50aee
|
diff --git a/edk2toollib/uefi/edk2/parsers/base_parser_test.py b/edk2toollib/uefi/edk2/parsers/base_parser_test.py
index 6f3f191..a7857ff 100644
--- a/edk2toollib/uefi/edk2/parsers/base_parser_test.py
+++ b/edk2toollib/uefi/edk2/parsers/base_parser_test.py
@@ -588,26 +588,34 @@ def test_find_path(self):
parser.Lines = ["hello"]
package_paths = ["Common/Test", "SM_MAGIC"]
root_path = tempfile.mkdtemp()
- target_filedir = os.path.join(root_path, "BuildPkg")
- parser.TargetFilePath = target_filedir
- parser.SetPackagePaths(package_paths)
- parser.SetBaseAbsPath(root_path)
- os.makedirs(target_filedir)
index = 0
- root_file = "root.txt"
- target_file = "target.txt"
+ # create the packages path folders
for package in package_paths:
pack_path = os.path.join(root_path, package)
os.makedirs(pack_path)
parser.WriteLinesToFile(os.path.join(pack_path, f"package_{index}.txt"))
index += 1
+ # setup the parser
+ parser.SetBaseAbsPath(root_path)
+ parser.SetPackagePaths(package_paths)
+
+ # create the root and target files
+ root_file = "root.txt"
+ target_file = "target.txt"
+
root_filepath = os.path.join(root_path, root_file)
+ target_filedir = os.path.join(root_path, "BuildPkg")
target_filepath = os.path.join(target_filedir, target_file)
+ # create root file
parser.WriteLinesToFile(root_filepath)
+ # create target file
+ os.makedirs(target_filedir)
parser.WriteLinesToFile(target_filepath)
-
+ parser.TargetFilePath = target_filepath
+ # check if we can find the root
root_found = parser.FindPath(root_file)
self.assertEqual(root_found, root_filepath)
+ # check we can find the target using the target path
target_found = parser.FindPath(target_file)
self.assertEqual(target_found, target_filepath)
@@ -620,8 +628,12 @@ def test_find_path(self):
# invalid files
invalid_filename = "YOU_WONT_FIND_ME.txt"
invalid_file = os.path.join(root_path, invalid_filename)
- invalid_result = parser.FindPath(invalid_filename)
- self.assertEqual(invalid_file, invalid_result)
+ invalid_result = parser.FindPath(invalid_file)
+ invalid_result2 = parser.FindPath(invalid_filename)
+ self.assertEqual(None, invalid_result)
+ self.assertEqual(None, invalid_result2)
+ invalid_result3 = parser.FindPath(None)
+ self.assertEqual(None, invalid_result3)
# make sure we can write out to a file
diff --git a/edk2toollib/uefi/edk2/parsers/dsc_parser_test.py b/edk2toollib/uefi/edk2/parsers/dsc_parser_test.py
index 5d2a124..5e6296c 100644
--- a/edk2toollib/uefi/edk2/parsers/dsc_parser_test.py
+++ b/edk2toollib/uefi/edk2/parsers/dsc_parser_test.py
@@ -30,7 +30,7 @@ def write_to_file(file_path, data):
def test_dsc_include_single_file(self):
''' This tests whether includes work properly '''
- workspace = tempfile.gettempdir()
+ workspace = tempfile.mkdtemp()
file1_name = "file1.dsc"
file2_name = "file2.dsc"
@@ -54,7 +54,7 @@ def test_dsc_include_single_file(self):
def test_dsc_include_missing_file(self):
''' This tests whether includes work properly '''
- workspace = tempfile.gettempdir()
+ workspace = tempfile.mkdtemp()
file1_name = "file1.dsc"
file1_path = os.path.join(workspace, file1_name)
@@ -70,7 +70,7 @@ def test_dsc_include_missing_file(self):
def test_dsc_include_missing_file_no_fail_mode(self):
''' This tests whether includes work properly if no fail mode is on'''
- workspace = tempfile.gettempdir()
+ workspace = tempfile.mkdtemp()
file1_name = "file1.dsc"
file1_path = os.path.join(workspace, file1_name)
@@ -83,3 +83,74 @@ def test_dsc_include_missing_file_no_fail_mode(self):
parser.SetNoFailMode()
parser.SetBaseAbsPath(workspace)
parser.ParseFile(file1_path)
+
+ def test_dsc_parse_file_on_package_path(self):
+ ''' This tests whether includes work properly if no fail mode is on'''
+ workspace = tempfile.mkdtemp()
+ working_dir_name = "working"
+ working2_dir_name = "working2"
+
+ working_folder = os.path.join(workspace, working_dir_name)
+ working2_folder = os.path.join(working_folder, working2_dir_name)
+ os.makedirs(working_folder, exist_ok=True)
+ os.makedirs(working2_folder, exist_ok=True)
+
+ file1_name = "file1.dsc"
+ file1_path = os.path.join(working2_folder, file1_name)
+ file1_short_path = os.path.join(working2_dir_name, file1_name)
+ file1_data = "[Defines]\n INCLUDED=TRUE"
+
+ TestDscParserIncludes.write_to_file(file1_path, file1_data)
+ with self.assertRaises(FileNotFoundError):
+ parser = DscParser()
+ parser.SetBaseAbsPath(workspace)
+ parser.ParseFile(file1_short_path)
+
+ parser = DscParser()
+ parser.SetBaseAbsPath(workspace)
+ parser.SetPackagePaths([working_folder, ])
+ parser.ParseFile(file1_short_path)
+ self.assertEqual(parser.LocalVars["INCLUDED"], "TRUE") # make sure we got the defines
+
+ def test_dsc_include_relative_path(self):
+ ''' This tests whether includes work properly with a relative path'''
+ workspace = tempfile.mkdtemp()
+ outside_folder = os.path.join(workspace, "outside")
+ inside_folder = os.path.join(outside_folder, "inside")
+ inside2_folder = os.path.join(outside_folder, "inside2")
+ random_folder = os.path.join(outside_folder, "random")
+ os.makedirs(inside_folder, exist_ok=True)
+ os.makedirs(inside2_folder, exist_ok=True)
+ os.makedirs(random_folder, exist_ok=True)
+ cwd = os.getcwd()
+ os.chdir(random_folder)
+ try:
+
+ file1_name = "file1.dsc"
+ file1_path = os.path.join(outside_folder, file1_name)
+
+ file2_name = "file2.dsc"
+ file2_path = os.path.join(inside_folder, file2_name)
+
+ file3_name = "file3.dsc"
+ file3_path = os.path.join(inside2_folder, file3_name)
+
+ file1_data = "!include " + os.path.relpath(file2_path, os.path.dirname(file1_path)).replace("\\", "/")
+ file2_data = "!include " + os.path.relpath(file3_path, os.path.dirname(file2_path)).replace("\\", "/")
+ file3_data = "[Defines]\n INCLUDED=TRUE"
+
+ print(f"{file1_path}: {file1_data}")
+ print(f"{file2_path}: {file2_data}")
+ print(f"{file3_path}: {file3_data}")
+
+ TestDscParserIncludes.write_to_file(file1_path, file1_data)
+ TestDscParserIncludes.write_to_file(file2_path, file2_data)
+ TestDscParserIncludes.write_to_file(file3_path, file3_data)
+
+ parser = DscParser()
+ parser.SetBaseAbsPath(workspace)
+ parser.ParseFile(file1_path)
+
+ self.assertEqual(parser.LocalVars["INCLUDED"], "TRUE") # make sure we got the defines
+ finally:
+ os.chdir(cwd)
|
stuart_ci_build does not support execution from paths other than the workspace root
stuart_ci_build is working when I execute it from the workspace root, but fails when I run it from a subdirectory.
For example, the following works from the Mu_Plus repo workspace root:
```mu_plus>stuart_ci_build -c .pytool\CISettings.py -p FirmwarePolicyPkg```
But when I change to a subdirectory:
```mu_plus\FirmwarePolicyPkg>stuart_ci_build -c ..\.pytool\CISettings.py -p FirmwarePolicyPkg```
it fails with
```
ERROR - --->Test Failed: Compiler Plugin DEBUG returned 1
PROGRESS - --Running FirmwarePolicyPkg: Compiler Plugin RELEASE --
CRITICAL - EXCEPTION: [Errno 2] No such file or directory: 'FirmwarePolicyPkg/FirmwarePolicyPkg.dsc'
CRITICAL - Traceback (most recent call last):
File "c:\git\venv_1911\lib\site-packages\edk2toolext\invocables\edk2_ci_build.py", line 226, in Go
tc, plugin_output_stream)
File "C:\git\FwPolicy\fw_policy\MU_BASECORE\.pytool\Plugin\CompilerPlugin\CompilerPlugin.py", line 80, in RunBuildPlugin
dp.ParseFile(AP_Path)
File "c:\git\venv_1911\lib\site-packages\edk2toollib\uefi\edk2\parsers\dsc_parser.py", line 259, in ParseFile
f = open(os.path.join(filepath), "r")
FileNotFoundError: [Errno 2] No such file or directory: 'FirmwarePolicyPkg/FirmwarePolicyPkg.dsc'
ERROR - --->Test Failed: Compiler Plugin RELEASE returned 1
```
|
0.0
|
ec7d9a9fb1f490bb82301f1a28eac6098fe50aee
|
[
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserPathAndFile::test_find_path",
"edk2toollib/uefi/edk2/parsers/dsc_parser_test.py::TestDscParserIncludes::test_dsc_include_relative_path",
"edk2toollib/uefi/edk2/parsers/dsc_parser_test.py::TestDscParserIncludes::test_dsc_parse_file_on_package_path"
] |
[
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParser::test_replace_boolean_constants",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParser::test_replace_macro_local_var_priority",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParser::test_replace_macro_using_dollarsign",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_conditional_ifdef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_conditional_ifndef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_emulator_conditional_not_in",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_emulator_conditional_not_it_all",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_emulator_conditional_not_or",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_emulator_conditional_or_double_in",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_emulator_conditional_parens_order",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_and_operation_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_bad_else",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_bad_endif",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_ands_ors",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_false_equals_zero",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_greater_than",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_greater_than_equal",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_hex_number",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_invalid_operators",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_less_than",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_less_than_equal",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_non_numerical",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_not_equals_true_false",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_reset",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_single_boolean",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_cannot_be_greater_than",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_cannot_be_greater_than_hex",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_equals_one",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_not_equals_false",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_variables",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_else",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_extra_tokens",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_garbage_input",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_in_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_invalid_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_or_operation_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_elseif",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifdef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifdef_dollarsign",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifndef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifndef_dollarsign",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_without_resolution",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserGuids::test_is_guid",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserGuids::test_parse_guid",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserVariables::test_replace_input_variables",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserVariables::test_replace_local_variables",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserPathAndFile::test_write_lines",
"edk2toollib/uefi/edk2/parsers/dsc_parser_test.py::TestDscParserBasic::test_creation",
"edk2toollib/uefi/edk2/parsers/dsc_parser_test.py::TestDscParserIncludes::test_dsc_include_missing_file",
"edk2toollib/uefi/edk2/parsers/dsc_parser_test.py::TestDscParserIncludes::test_dsc_include_missing_file_no_fail_mode",
"edk2toollib/uefi/edk2/parsers/dsc_parser_test.py::TestDscParserIncludes::test_dsc_include_single_file"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-20 16:29:57+00:00
|
bsd-2-clause
| 5,905 |
|
tianocore__edk2-pytool-library-189
|
diff --git a/edk2toollib/uefi/edk2/path_utilities.py b/edk2toollib/uefi/edk2/path_utilities.py
index 40dbf61..6c51b76 100644
--- a/edk2toollib/uefi/edk2/path_utilities.py
+++ b/edk2toollib/uefi/edk2/path_utilities.py
@@ -245,53 +245,99 @@ def GetContainingPackage(self, InputPath: str) -> str:
return None
- def GetContainingModules(self, InputPath: str) -> list:
- """Find the list of modules (infs) that file path is in.
+ def GetContainingModules(self, input_path: str) -> list[str]:
+ """Find the list of modules (inf files) for a file path.
- for now just assume any inf in the same dir or if none
- then check parent dir. If InputPath is not in the filesystem
- this function will try to return the likely containing module
- but if the entire module has been deleted this isn't possible.
+ Note: This function only accepts absolute paths. An exception will
+ be raised if a non-absolute path is given.
+
+ Note: If input_path does not exist in the filesystem, this function
+ will try to return the likely containing module(s) but if the
+ entire module has been deleted, this isn't possible.
+
+ - If a .inf file is given, that file is returned.
+ - Otherwise, the nearest set of .inf files (in the closest parent)
+ will be returned in a list of file path strings.
Args:
- InputPath (str): file path in the Os spefic path form
+ input_path (str): Absolute path to a file, directory, or module.
+ Supports both Windows and Linux like paths.
Returns:
- (list): list of module inf paths in absolute form.
+ (list[str]): Absolute paths of .inf files that could be the
+ containing module.
"""
- self.logger.debug("GetContainingModules: %s" % InputPath)
-
- # if INF return self
- if fnmatch.fnmatch(InputPath.lower(), '*.inf'):
- return [InputPath]
-
- # Before checking the local filesystem for an INF
- # make sure filesystem has file or at least folder
- if not os.path.isfile(InputPath):
- logging.debug("InputPath doesn't exist in filesystem")
+ input_path = Path(os.path.normcase(input_path))
+ if not input_path.is_absolute():
+ # Todo: Return a more specific exception type when
+ # https://github.com/tianocore/edk2-pytool-library/issues/184 is
+ # implemented.
+ raise Exception("Module path must be absolute.")
+
+ package_paths = [Path(os.path.normcase(x)) for x in self.PackagePathList]
+ workspace_path = Path(os.path.normcase(self.WorkspacePath))
+ all_root_paths = package_paths + [workspace_path]
+
+ # For each root path, find the maximum allowed root in its hierarchy.
+ maximum_root_paths = all_root_paths
+ for root_path in maximum_root_paths:
+ for other_root_path in maximum_root_paths[:]:
+ if root_path == other_root_path:
+ continue
+ if root_path.is_relative_to(other_root_path):
+ if len(root_path.parts) > len(other_root_path.parts):
+ maximum_root_paths.remove(root_path)
+ else:
+ maximum_root_paths.remove(other_root_path)
+
+ # Verify the file path is within a valid workspace or package path
+ # directory.
+ for path in maximum_root_paths:
+ if input_path.is_relative_to(path):
+ break
+ else:
+ return []
modules = []
- # Check current dir
- dirpath = os.path.dirname(InputPath)
- if os.path.isdir(dirpath):
- for f in os.listdir(dirpath):
- if fnmatch.fnmatch(f.lower(), '*.inf'):
- self.logger.debug("Found INF file in %s. INf is: %s", dirpath, f)
- modules.append(os.path.join(dirpath, f))
-
- # if didn't find any in current dir go to parent dir.
- # this handles cases like:
- # ModuleDir/
- # Module.inf
- # x64/
- # file.c
- #
- if (len(modules) == 0):
- dirpath = os.path.dirname(dirpath)
- if os.path.isdir(dirpath):
- for f in os.listdir(dirpath):
- if fnmatch.fnmatch(f.lower(), '*.inf'):
- self.logger.debug("Found INF file in %s. INf is: %s", dirpath, f)
- modules.append(os.path.join(dirpath, f))
+ if input_path.suffix == '.inf':
+ # Return the file path given since it is a module .inf file
+ modules = [str(input_path)]
+
+ if not modules:
+ # Continue to ascend directories up to a maximum root path.
+ #
+ # This handles cases like:
+ # ModuleDir/ | ModuleDir/ | ...similarly nested files
+ # Module.inf | Module.inf |
+ # x64/ | Common/ |
+ # file.c | X64/ |
+ # | file.c |
+ #
+ # The terminating condition of the loop is when a maximum root
+ # path has been reached.
+ #
+ # A maximum root path represents the maximum allowed ascension
+ # point in the input_path directory hierarchy as sub-roots like
+ # a package path pointing under a workspace path are already
+ # accounted for during maximum root path filtering.
+ #
+ # Given a root path is either the workspace or a package path,
+ # neither of which are a module directory, once that point is
+ # reached, all possible module candidates are exhausted.
+ current_dir = input_path.parent
+ while current_dir not in maximum_root_paths:
+ if current_dir.is_dir():
+ current_dir_inf_files = \
+ [str(f) for f in current_dir.iterdir() if
+ f.is_file() and f.suffix.lower() == '.inf']
+ if current_dir_inf_files:
+ # A .inf file(s) exist in this directory.
+ #
+ # Since this is the closest parent that can be considered
+ # a module, return the .inf files as module candidates.
+ modules.extend(current_dir_inf_files)
+ break
+
+ current_dir = current_dir.parent
return modules
|
tianocore/edk2-pytool-library
|
ed32e19c5bf3afd7b1495e68525b9307dc4e0380
|
diff --git a/edk2toollib/uefi/edk2/test_path_utilities.py b/edk2toollib/uefi/edk2/test_path_utilities.py
index 8526760..e46d6e1 100644
--- a/edk2toollib/uefi/edk2/test_path_utilities.py
+++ b/edk2toollib/uefi/edk2/test_path_utilities.py
@@ -13,6 +13,7 @@
import tempfile
import shutil
from edk2toollib.uefi.edk2.path_utilities import Edk2Path
+from pathlib import Path
class PathUtilitiesTest(unittest.TestCase):
@@ -385,6 +386,117 @@ def test_get_containing_package_ws_abs_different_case(self):
p = os.path.join(folder_pp1_abs, "testfile.c")
self.assertIsNone(pathobj.GetContainingPackage(p), folder_pp_rel)
+ def test_get_containing_modules_with_relative_path(self):
+ """Test that a relative path raises an exception.
+
+ Note: GetContainingModules() only accepts absolute paths.
+
+ File layout:
+ root/ <-- Current working directory (self.tmp)
+ folder_ws <-- Workspace directory
+ pp1 <-- Package Path 1
+ PPTestPkg <-- An edk2 package
+ PPTestPkg.DEC
+ module1
+ module1.INF
+ module2
+ module2.INF
+ X64
+ TestFile.c
+ WSTestPkg <-- An edk2 package
+ WSTestPkg.dec
+ module1
+ module1.inf
+ module2
+ module2.inf
+ X64
+ TestFile.c
+ """
+ # Make the workspace directory: folder_ws/
+ ws_rel = "folder_ws"
+ ws_abs = os.path.join(self.tmp, ws_rel)
+ os.mkdir(ws_abs)
+
+ # Make Package Path 1 directory: folder_ws/pp1
+ folder_pp_rel = "pp1"
+ folder_pp1_abs = os.path.join(ws_abs, folder_pp_rel)
+ os.mkdir(folder_pp1_abs)
+
+ # Make WSTestPkg: folder_ws/WSTestPkg
+ ws_p_name = "WSTestPkg"
+ self._make_edk2_package_helper(ws_abs, ws_p_name)
+
+ # Make PPTestPkg in Package Path 1: folder_ws/pp1/PPTestPkg
+ pp_p_name = "PPTestPkg"
+ self._make_edk2_package_helper(folder_pp1_abs, pp_p_name, extension_case_lower=False)
+
+ pathobj = Edk2Path(ws_abs, [folder_pp1_abs])
+
+ # Change the current working directory to the workspace
+ os.chdir(ws_abs)
+
+ # Pass a valid relative path to GetContainingModules()
+ # folder_ws/WSTestPkg/module2/module2.inf
+ p = os.path.join("WSTestPkg", "module2", "module2.inf")
+ self.assertRaises(Exception, pathobj.GetContainingModules, p)
+
+ # Pass an invalid relative path to GetContainingModules()
+ # folder_ws/WSTestPkg/module2/module3.inf
+ p = os.path.join("WSTestPkg", "module2", "module3.inf")
+ self.assertRaises(Exception, pathobj.GetContainingModules, p)
+
+ # Pass a valid non .inf relative path to GetContainingModules()
+ # folder_ws/WSTestPkg/module2/X64/TestFile.c
+ p = os.path.join("WSTestPkg", "module2", "X64", "TestFile.c")
+ self.assertRaises(Exception, pathobj.GetContainingModules, p)
+
+ def test_get_containing_module_with_infs_in_other_temp_dirs(self):
+ ''' test that GetContainingModule does not look outside the workspace
+ root for modules. To do so, a temporary .inf file is placed in the
+ user's temporary directory. Such a file could already exist and
+ similarly impact test results. To ensure consistent test results, this
+ test explicitly creates such a file.
+
+ File layout:
+
+ root/ <-- User temporary directory
+ SomeModule.inf <-- .inf file in user temporary directory
+ <temp_working_dir>/ <-- Current working directory (self.tmp)
+ folder_ws <-- Workspace root
+ WSTestPkg <-- An edk2 package
+ WSTestPkg.dec
+ module1
+ module1.inf
+ module2
+ module2.inf
+ X64
+ TestFile.c
+ '''
+ # Make the workspace directory: <self.tmp>/folder_ws/
+ ws_rel = "folder_ws"
+ ws_abs = os.path.join(self.tmp, ws_rel)
+
+ # Make WSTestPkg: <self.tmp>/folder_ws/WSTestPkg/
+ ws_p_name = "WSTestPkg"
+ self._make_edk2_package_helper(ws_abs, ws_p_name)
+
+ # Place a .inf file in the temporary directory
+ # <Temporary Directory>/SomeModule.inf
+ other_inf = os.path.join(os.path.dirname(self.tmp), "SomeModule.inf")
+ with open(other_inf, 'w'):
+ pass
+
+ try:
+ pathobj = Edk2Path(ws_abs, [])
+
+ # File outside of the workspace - invalid and should return None
+ p = os.path.join(os.path.dirname(ws_abs), "testfile.c")
+ relist = pathobj.GetContainingModules(p)
+ self.assertEqual(len(relist), 0)
+ finally:
+ if os.path.isfile(other_inf):
+ os.remove(other_inf)
+
def test_get_containing_module(self):
''' test basic usage of GetContainingModule with packages path nested
inside the workspace
@@ -432,7 +544,9 @@ def test_get_containing_module(self):
p = os.path.join(ws_pkg_abs, "module1", "testfile.c")
relist = pathobj.GetContainingModules(p)
self.assertEqual(len(relist), 1)
- self.assertIn(os.path.join(ws_pkg_abs, "module1", "module1.inf"), relist)
+ self.assertEqual(
+ Path(os.path.join(ws_pkg_abs, "module1", "module1.inf")),
+ Path(relist[0]))
# file in workspace root - no package- should return ws root
p = os.path.join(ws_abs, "testfile.c")
@@ -448,13 +562,17 @@ def test_get_containing_module(self):
p = os.path.join(ws_pkg_abs, "module2", "X64", "testfile.c")
relist = pathobj.GetContainingModules(p)
self.assertEqual(len(relist), 1)
- self.assertIn(os.path.join(ws_pkg_abs, "module2", "module2.inf"), relist)
+ self.assertEqual(
+ Path(os.path.join(ws_pkg_abs, "module2", "module2.inf")),
+ Path(relist[0]))
# inf file in module2 x64
p = os.path.join(ws_pkg_abs, "module2", "module2.inf")
relist = pathobj.GetContainingModules(p)
self.assertEqual(len(relist), 1)
- self.assertIn(os.path.join(ws_pkg_abs, "module2", "module2.inf"), relist)
+ self.assertEqual(
+ Path(os.path.join(ws_pkg_abs, "module2", "module2.inf")),
+ Path(relist[0]))
# file in PPTestPkg root
p = os.path.join(pp_pkg_abs, "testfile.c")
@@ -465,13 +583,17 @@ def test_get_containing_module(self):
p = os.path.join(pp_pkg_abs, "module1", "testfile.c")
relist = pathobj.GetContainingModules(p)
self.assertEqual(len(relist), 1)
- self.assertIn(os.path.join(pp_pkg_abs, "module1", "module1.INF"), relist)
+ self.assertEqual(
+ Path(os.path.join(pp_pkg_abs, "module1", "module1.INF")),
+ Path(relist[0]))
# inf file in module in PPTestPkg
p = os.path.join(pp_pkg_abs, "module1", "module1.INF")
relist = pathobj.GetContainingModules(p)
self.assertEqual(len(relist), 1)
- self.assertIn(os.path.join(pp_pkg_abs, "module1", "module1.INF"), relist)
+ self.assertEqual(
+ Path(os.path.join(pp_pkg_abs, "module1", "module1.INF")),
+ Path(relist[0]))
# file in packages path root - no module
p = os.path.join(folder_pp1_abs, "testfile.c")
@@ -493,7 +615,9 @@ def test_get_containing_module(self):
p = os.path.join(ws_pkg_abs, "module1", "ThisParentDirDoesNotExist", "testfile.c")
relist = pathobj.GetContainingModules(p)
self.assertEqual(len(relist), 1)
- self.assertIn(os.path.join(ws_pkg_abs, "module1", "module1.inf"), relist)
+ self.assertEqual(
+ Path(os.path.join(ws_pkg_abs, "module1", "module1.inf")),
+ Path(relist[0]))
def test_get_edk2_relative_path_from_absolute_path(self):
''' test basic usage of GetEdk2RelativePathFromAbsolutePath with packages path nested
|
[Bug]: Edk2Path.GetContainingModules() can search beyond max limit
### Contact Details
_No response_
### Describe the Bug
The function goes up two parent levels without checking if it has passed the workspace root and all package roots. This means it will scan a directory outside the allowed search areas for an .inf file that would represent the containing module.
To exemplify the problem, ` test_get_containing_module()` will fail if a .inf file is in the user's temporary directory.
https://github.com/tianocore/edk2-pytool-library/blob/master/edk2toollib/uefi/edk2/path_utilities_test.py#L388
### Reproduction steps
Run `test_get_containing_module()` with a .inf file in your temporary directory.
### Expected behavior
`Edk2Path.GetContainingModules()` does not include invalid files in its search.
### Execution Environment
_No response_
### Pip packages
_No response_
### Additional context
_No response_
|
0.0
|
ed32e19c5bf3afd7b1495e68525b9307dc4e0380
|
[
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_containing_module_with_infs_in_other_temp_dirs",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_containing_modules_with_relative_path"
] |
[
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_basic_init_ws_abs",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_basic_init_ws_cwd",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_absolute_path_on_this_system_from_edk2_relative_path",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_absolute_path_then_relative_path_when_path_contains_repeated_packagepath_name",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_containing_module",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_containing_package_inside_workspace",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_containing_package_outside_workspace",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_edk2_relative_path_from_absolute_path",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_folder_is_next_to_package",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_package_is_not_directly_inside_packages_path",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_package_path_inside_package",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_packages_path_list_contains_substrings",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_path_does_not_exist",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_with_nested_packages",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_nonexistant_abs",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_nonexistant_ws",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_pp_inside_workspace",
"edk2toollib/uefi/edk2/test_path_utilities.py::PathUtilitiesTest::test_pp_outside_workspace"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-07 13:45:24+00:00
|
bsd-2-clause
| 5,906 |
|
tianocore__edk2-pytool-library-238
|
diff --git a/.cspell.json b/.cspell.json
index 00e3e94..2d2eb60 100644
--- a/.cspell.json
+++ b/.cspell.json
@@ -12,8 +12,7 @@
"*.exe"
],
"allowCompoundWords": true,
- "ignoreWords": [
- ],
+ "ignoreWords": [],
"words": [
"isfile",
"isclass",
@@ -68,7 +67,9 @@
"dirid",
"nologo",
"rsassa",
- "fixedpcd", "featurepcd", "patchpcd",
+ "fixedpcd",
+ "featurepcd",
+ "patchpcd",
"edkii",
"levelno",
"localizable",
@@ -93,6 +94,8 @@
"markdownlint",
"codecov",
"nonlocalizable",
- "mkdocs"
+ "mkdocs",
+ "DBXFILE",
+ "decodefs"
]
-}
+}
\ No newline at end of file
diff --git a/edk2toollib/uefi/authenticated_variables_structure_support.py b/edk2toollib/uefi/authenticated_variables_structure_support.py
index 123a09f..f5e311a 100644
--- a/edk2toollib/uefi/authenticated_variables_structure_support.py
+++ b/edk2toollib/uefi/authenticated_variables_structure_support.py
@@ -799,7 +799,7 @@ def PopulateFromFileStream(self, fs):
Daylight = struct.unpack("<B", fs.read(1))[0]
fs.seek(1, 1) # seek past pad2
- self.Time = datetime.datetime(Year, Month, Day, Hour, Minute, Second, NanoSecond / 1000)
+ self.Time = datetime.datetime(Year, Month, Day, Hour, Minute, Second, NanoSecond // 1000)
logging.debug("I don't know how to deal with TimeZone or Daylight and I don't care at the moment")
logging.debug("Timezone value is: 0x%x" % TimeZone)
logging.debug("Daylight value is: 0x%X" % Daylight)
|
tianocore/edk2-pytool-library
|
4bd867b57a0a959840e814ab006d172b16fca018
|
diff --git a/edk2toollib/tests/test_authenticated_variables_structure_support.py b/edk2toollib/tests/test_authenticated_variables_structure_support.py
index 55ed5ea..837685d 100644
--- a/edk2toollib/tests/test_authenticated_variables_structure_support.py
+++ b/edk2toollib/tests/test_authenticated_variables_structure_support.py
@@ -13,7 +13,7 @@
import io
from edk2toollib.uefi.authenticated_variables_structure_support import \
EfiSignatureDatabase, EfiSignatureList, EfiSignatureDataEfiCertSha256,\
- EfiSignatureDataEfiCertX509, EfiSignatureDataFactory
+ EfiSignatureDataEfiCertX509, EfiSignatureDataFactory, EFiVariableAuthentication2
# import logging
VERBOSE = False
@@ -71,6 +71,870 @@
'9999999999999999999999999999999999999999999999999999999999999999'
]
+# DBXFILE contains Dbx contents downloaded from UEFI.org's Revocation List on 2023.01.30 (x64)
+DBXFILE = 'da070306131115000000000000000000'\
+ 'f60c00000002f10e9dd2af4adf68ee49'\
+ '8aa9347d375665a730820cda02010131'\
+ '0f300d06096086480165030402010500'\
+ '300b06092a864886f70d010701a0820a'\
+ 'ed308204fd308203e5a0030201020213'\
+ '330000002596d20c5c53120043000000'\
+ '000025300d06092a864886f70d01010b'\
+ '0500308180310b300906035504061302'\
+ '5553311330110603550408130a576173'\
+ '68696e67746f6e3110300e0603550407'\
+ '13075265646d6f6e64311e301c060355'\
+ '040a13154d6963726f736f667420436f'\
+ '72706f726174696f6e312a3028060355'\
+ '040313214d6963726f736f667420436f'\
+ '72706f726174696f6e204b454b204341'\
+ '2032303131301e170d32313039303231'\
+ '38323433315a170d3232303930313138'\
+ '323433315a308186310b300906035504'\
+ '0613025553311330110603550408130a'\
+ '57617368696e67746f6e3110300e0603'\
+ '55040713075265646d6f6e64311e301c'\
+ '060355040a13154d6963726f736f6674'\
+ '20436f72706f726174696f6e3130302e'\
+ '060355040313274d6963726f736f6674'\
+ '2057696e646f77732055454649204b65'\
+ '792045786368616e6765204b65793082'\
+ '0122300d06092a864886f70d01010105'\
+ '000382010f003082010a0282010100ca'\
+ 'be8aef9b0f8692546db68e66e1585f14'\
+ 'ebebe6eed227c8fa323674b22e24efba'\
+ 'fa169e7a5fa4c2e6f252076ab245a4f8'\
+ 'cf358d0d9c618cafae8ad1807cd50c19'\
+ '23a741a3525a4e32e78d21b83b063517'\
+ '03349de5635e77b2ac3e502fb7231c59'\
+ 'e3f3c977139eeb65e44324f6cf04701f'\
+ 'd5962995d0f573012769eea20942a7be'\
+ 'c1f7e67d4b2e6ed8a958a98e4043aa6f'\
+ '620b676becebab447f6ecbf174fda343'\
+ 'ba10cbfc1e05c2d652d3a139626f4a18'\
+ 'e7f8bf3fa412f63fc13af332040049d2'\
+ 'b26ea270c5cf914f56661de0d54ce3bc'\
+ '3399e01e016190a03dc8826a87b032e0'\
+ '1526dadd767ef07c0ba72953d021115f'\
+ '87264bf21c6cb8dbe6c7f067087cb702'\
+ '03010001a38201663082016230140603'\
+ '551d25040d300b06092b060104018237'\
+ '4f01301d0603551d0e04160414753d9c'\
+ 'ab3265b73cafcc22f9b16ba1e38dcf8d'\
+ '6130450603551d11043e303ca43a3038'\
+ '311e301c060355040b13154d6963726f'\
+ '736f667420436f72706f726174696f6e'\
+ '311630140603550405130d3232393936'\
+ '312b343637353933301f0603551d2304'\
+ '183016801462fc43cda03ea4cb6712d2'\
+ '5bd955ac7bccb68a5f30530603551d1f'\
+ '044c304a3048a046a044864268747470'\
+ '3a2f2f7777772e6d6963726f736f6674'\
+ '2e636f6d2f706b696f70732f63726c2f'\
+ '4d6963436f724b454b4341323031315f'\
+ '323031312d30362d32342e63726c3060'\
+ '06082b06010505070101045430523050'\
+ '06082b06010505073002864468747470'\
+ '3a2f2f7777772e6d6963726f736f6674'\
+ '2e636f6d2f706b696f70732f63657274'\
+ '732f4d6963436f724b454b4341323031'\
+ '315f323031312d30362d32342e637274'\
+ '300c0603551d130101ff04023000300d'\
+ '06092a864886f70d01010b0500038201'\
+ '010069662ba664b30e7f47e3308e9cbf'\
+ '0f6c1250bd8eecd227c8a223c5ced041'\
+ 'a35bef9d8aa41a31ceee757163a15d4f'\
+ 'dc921518daa170873922e85324a6d2a7'\
+ 'c2a74b483a488f7ef0262324568d6391'\
+ '4a9d66915ee9f6a4c1c0f07592689c16'\
+ '833a87006ce0f650b15314b0f4f3bb0b'\
+ '3d623db77a4d526e8dc1d9358f2a3a6a'\
+ '76661a41677f4d00a2979e8493e27106'\
+ 'e3b5414d11dc91d0c0dbe83ea3b18895'\
+ '9b48889350293669c8bf9315b34d2aae'\
+ '0f64b78e36051c341cde7d77ec06aa47'\
+ 'c6aa35f0bf6f36455fbc17c431b6048b'\
+ 'c2003c88b48f04b352da0fbef900a2f2'\
+ '0262d867a54d4dd841605e1d4b0024d4'\
+ '6f6c79cd4fd5b3c022107ac921ed71e0'\
+ '8a59308205e8308203d0a00302010202'\
+ '0a610ad188000000000003300d06092a'\
+ '864886f70d01010b0500308191310b30'\
+ '09060355040613025553311330110603'\
+ '550408130a57617368696e67746f6e31'\
+ '10300e060355040713075265646d6f6e'\
+ '64311e301c060355040a13154d696372'\
+ '6f736f667420436f72706f726174696f'\
+ '6e313b3039060355040313324d696372'\
+ '6f736f667420436f72706f726174696f'\
+ '6e205468697264205061727479204d61'\
+ '726b6574706c61636520526f6f74301e'\
+ '170d3131303632343230343132395a17'\
+ '0d3236303632343230353132395a3081'\
+ '80310b30090603550406130255533113'\
+ '30110603550408130a57617368696e67'\
+ '746f6e3110300e060355040713075265'\
+ '646d6f6e64311e301c060355040a1315'\
+ '4d6963726f736f667420436f72706f72'\
+ '6174696f6e312a302806035504031321'\
+ '4d6963726f736f667420436f72706f72'\
+ '6174696f6e204b454b20434120323031'\
+ '3130820122300d06092a864886f70d01'\
+ '010105000382010f003082010a028201'\
+ '0100c4e8b58abfad5726b026c3eae7fb'\
+ '577a44025d070dda4ae5742ae6b00fec'\
+ '6debec7fb9e35a63327c11174f0ee30b'\
+ 'a73815938ec6f5e084b19a9b2ce7f5b7'\
+ '91d609e1e2c004a8ac301cdf48f30650'\
+ '9a64a7517fc8854f8f2086cefe2fe19f'\
+ 'ff82c0ede9cdcef4536a623a0b43b9e2'\
+ '25fdfe05f9d4c414ab11e223898d70b7'\
+ 'a41d4decaee59cfa16c2d7c1cbd4e8c4'\
+ '2fe599ee248b03ec8df28beac34afb43'\
+ '11120b7eb547926cdce60489ebf53304'\
+ 'eb10012a71e5f983133cff25092f6876'\
+ '46ffba4fbedcad712a58aafb0ed2793d'\
+ 'e49b653bcc292a9ffc7259a2ebae92ef'\
+ 'f6351380c602ece45fcc9d76cdef6392'\
+ 'c1af79408479877fe352a8e89d7b0769'\
+ '8f150203010001a382014f3082014b30'\
+ '1006092b060104018237150104030201'\
+ '00301d0603551d0e0416041462fc43cd'\
+ 'a03ea4cb6712d25bd955ac7bccb68a5f'\
+ '301906092b0601040182371402040c1e'\
+ '0a00530075006200430041300b060355'\
+ '1d0f040403020186300f0603551d1301'\
+ '01ff040530030101ff301f0603551d23'\
+ '04183016801445665243e17e5811bfd6'\
+ '4e9e2355083b3a226aa8305c0603551d'\
+ '1f045530533051a04fa04d864b687474'\
+ '703a2f2f63726c2e6d6963726f736f66'\
+ '742e636f6d2f706b692f63726c2f7072'\
+ '6f64756374732f4d6963436f72546869'\
+ '5061724d6172526f6f5f323031302d31'\
+ '302d30352e63726c306006082b060105'\
+ '0507010104543052305006082b060105'\
+ '050730028644687474703a2f2f777777'\
+ '2e6d6963726f736f66742e636f6d2f70'\
+ '6b692f63657274732f4d6963436f7254'\
+ '68695061724d6172526f6f5f32303130'\
+ '2d31302d30352e637274300d06092a86'\
+ '4886f70d01010b05000382020100d484'\
+ '88f514941802ca2a3cfb2a921c0cd7a0'\
+ 'd1f1e85266a8eea2b5757a9000aa2da4'\
+ '765aea79b7b9376a517b1064f6e164f2'\
+ '0267bef7a81b78bdbace8858640cd657'\
+ 'c819a35f05d6dbc6d069ce484b32b7eb'\
+ '5dd230f5c0f5b8ba7807a32bfe9bdb34'\
+ '5684ec82caae4125709c6be9fe900fd7'\
+ '961fe5e7941fb22a0c8d4bff2829107b'\
+ 'f7d77ca5d176b905c879ed0f90929cc2'\
+ 'fedf6f7e6c0f7bd4c145dd345196390f'\
+ 'e55e56d8180596f407a642b3a077fd08'\
+ '19f27156cc9f8623a487cba6fd587ed4'\
+ '696715917e81f27f13e50d8b8a3c8784'\
+ 'ebe3cebd43e5ad2d84938e6a2b5a7c44'\
+ 'fa52aa81c82d1cbbe052df0011f89a3d'\
+ 'c160b0e133b5a388d165190a1ae7ac7c'\
+ 'a4c182874e38b12f0dc514876ffd8d2e'\
+ 'bc39b6e7e6c3e0e4cd2784ef9442ef29'\
+ '8b9046413b811b67d8f9435965cb0dbc'\
+ 'fd00924ff4753ba7a924fc50414079e0'\
+ '2d4f0a6a27766e52ed96697baf0ff787'\
+ '05d045c2ad5314811ffb3004aa373661'\
+ 'da4a691b34d868edd602cf6c940cd3cf'\
+ '6c2279adb1f0bc03a24660a9c407c221'\
+ '82f1fdf2e8793260bfd8aca522144bca'\
+ 'c1d84beb7d3f5735b2e64f75b4b06003'\
+ '2253ae91791dd69b411f15865470b2de'\
+ '0d350f7cb03472ba97603bf079eba2b2'\
+ '1c5da216b887c5e91bf6b597256f389f'\
+ 'e391fa8a7998c3690eb7a31c200597f8'\
+ 'ca14ae00d7c4f3c01410756b34a01bb5'\
+ '9960f35cb0c5574e36d23284bf9e3182'\
+ '01c4308201c002010130819830818031'\
+ '0b300906035504061302555331133011'\
+ '0603550408130a57617368696e67746f'\
+ '6e3110300e060355040713075265646d'\
+ '6f6e64311e301c060355040a13154d69'\
+ '63726f736f667420436f72706f726174'\
+ '696f6e312a3028060355040313214d69'\
+ '63726f736f667420436f72706f726174'\
+ '696f6e204b454b204341203230313102'\
+ '13330000002596d20c5c531200430000'\
+ '00000025300d06096086480165030402'\
+ '010500300d06092a864886f70d010101'\
+ '05000482010002fcf6d3fb1c3e7af2bf'\
+ 'f3a028b0d2ddace519e9d3bf9eaccc0b'\
+ '89e69ba782e42a6b7b723cbca7f93130'\
+ 'b79053db51f5c9e30b0607cb6144170a'\
+ '1e2053ba1edd1553c44cf694c094e594'\
+ 'e904d341d157789acebbf327b52a9027'\
+ '113195403625a47137dc8d779414d3cf'\
+ '434d0077d61de393a960f1525bbb8c90'\
+ 'b6db9ffa1763bffc50e9f89a33c8a58f'\
+ 'c3af6a6174d18ee42b60b32ed0edf014'\
+ '124e0cb65943b573b72b9c36b837b686'\
+ 'f1026874428c6b61cb6fec305fd51dde'\
+ '53b32ead7575d8a9b8de4079d6476b16'\
+ 'db7d1b029ad59f76a1ed7b532d05e508'\
+ '70eb873cd86c843b19a9e2ae412ad0f1'\
+ 'c768802626098dc8e1513529e6c7e062'\
+ '801e1ee7daa02616c4c14c509240aca9'\
+ '41f936934328cc280000000000003000'\
+ '0000bd9afa775903324dbd6028f4e78f'\
+ '784b80b4d96931bf0d02fd91a61e19d1'\
+ '4f1da452e66db2408ca8604d411f9265'\
+ '9f0abd9afa775903324dbd6028f4e78f'\
+ '784bf52f83a3fa9cfbd6920f722824db'\
+ 'e4034534d25b8507246b3b957dac6e1b'\
+ 'ce7abd9afa775903324dbd6028f4e78f'\
+ '784bc5d9d8a186e2c82d09afaa2a6f7f'\
+ '2e73870d3e64f72c4e08ef67796a840f'\
+ '0fbdbd9afa775903324dbd6028f4e78f'\
+ '784b1aec84b84b6c65a51220a9be7181'\
+ '965230210d62d6d33c48999c6b295a2b'\
+ '0a06bd9afa775903324dbd6028f4e78f'\
+ '784bc3a99a460da464a057c3586d83ce'\
+ 'f5f4ae08b7103979ed8932742df0ed53'\
+ '0c66bd9afa775903324dbd6028f4e78f'\
+ '784b58fb941aef95a25943b3fb5f2510'\
+ 'a0df3fe44c58c95e0ab80487297568ab'\
+ '9771bd9afa775903324dbd6028f4e78f'\
+ '784b5391c3a2fb112102a6aa1edc25ae'\
+ '77e19f5d6f09cd09eeb2509922bfcd59'\
+ '92eabd9afa775903324dbd6028f4e78f'\
+ '784bd626157e1d6a718bc124ab8da27c'\
+ 'bb65072ca03a7b6b257dbdcbbd60f65e'\
+ 'f3d1bd9afa775903324dbd6028f4e78f'\
+ '784bd063ec28f67eba53f1642dbf7dff'\
+ '33c6a32add869f6013fe162e2c32f1cb'\
+ 'e56dbd9afa775903324dbd6028f4e78f'\
+ '784b29c6eb52b43c3aa18b2cd8ed6ea8'\
+ '607cef3cfae1bafe1165755cf2e61484'\
+ '4a44bd9afa775903324dbd6028f4e78f'\
+ '784b90fbe70e69d633408d3e170c6832'\
+ 'dbb2d209e0272527dfb63d49d29572a6'\
+ 'f44cbd9afa775903324dbd6028f4e78f'\
+ '784b106faceacfecfd4e303b74f480a0'\
+ '8098e2d0802b936f8ec774ce21f31686'\
+ '689cbd9afa775903324dbd6028f4e78f'\
+ '784b174e3a0b5b43c6a607bbd3404f05'\
+ '341e3dcf396267ce94f8b50e2e23a9da'\
+ '920cbd9afa775903324dbd6028f4e78f'\
+ '784b2b99cf26422e92fe365fbf4bc30d'\
+ '27086c9ee14b7a6fff44fb2f6b900169'\
+ '9939bd9afa775903324dbd6028f4e78f'\
+ '784b2e70916786a6f773511fa7181fab'\
+ '0f1d70b557c6322ea923b2a8d3b92b51'\
+ 'af7dbd9afa775903324dbd6028f4e78f'\
+ '784b3fce9b9fdf3ef09d5452b0f95ee4'\
+ '81c2b7f06d743a737971558e70136ace'\
+ '3e73bd9afa775903324dbd6028f4e78f'\
+ '784b47cc086127e2069a86e03a6bef2c'\
+ 'd410f8c55a6d6bdb362168c31b2ce32a'\
+ '5adfbd9afa775903324dbd6028f4e78f'\
+ '784b71f2906fd222497e54a34662ab24'\
+ '97fcc81020770ff51368e9e3d9bfcbfd'\
+ '6375bd9afa775903324dbd6028f4e78f'\
+ '784b82db3bceb4f60843ce9d97c3d187'\
+ 'cd9b5941cd3de8100e586f2bda563757'\
+ '5f67bd9afa775903324dbd6028f4e78f'\
+ '784b8ad64859f195b5f58dafaa940b6a'\
+ '6167acd67a886e8f469364177221c559'\
+ '45b9bd9afa775903324dbd6028f4e78f'\
+ '784b8d8ea289cfe70a1c07ab7365cb28'\
+ 'ee51edd33cf2506de888fbadd60ebf80'\
+ '481cbd9afa775903324dbd6028f4e78f'\
+ '784baeebae3151271273ed95aa2e6711'\
+ '39ed31a98567303a332298f83709a9d5'\
+ '5aa1bd9afa775903324dbd6028f4e78f'\
+ '784bc409bdac4775add8db92aa22b5b7'\
+ '18fb8c94a1462c1fe9a416b95d8a3388'\
+ 'c2fcbd9afa775903324dbd6028f4e78f'\
+ '784bc617c1a8b1ee2a811c28b5a81b4c'\
+ '83d7c98b5b0c27281d610207ebe692c2'\
+ '967fbd9afa775903324dbd6028f4e78f'\
+ '784bc90f336617b8e7f983975413c997'\
+ 'f10b73eb267fd8a10cb9e3bdbfc667ab'\
+ 'db8bbd9afa775903324dbd6028f4e78f'\
+ '784b64575bd912789a2e14ad56f6341f'\
+ '52af6bf80cf94400785975e9f04e2d64'\
+ 'd745bd9afa775903324dbd6028f4e78f'\
+ '784b45c7c8ae750acfbb48fc37527d64'\
+ '12dd644daed8913ccd8a24c94d856967'\
+ 'df8ebd9afa775903324dbd6028f4e78f'\
+ '784b81d8fb4c9e2e7a8225656b4b8273'\
+ 'b7cba4b03ef2e9eb20e0a0291624eca1'\
+ 'ba86bd9afa775903324dbd6028f4e78f'\
+ '784bb92af298dc08049b78c77492d655'\
+ '1b710cd72aada3d77be54609e43278ef'\
+ '6e4dbd9afa775903324dbd6028f4e78f'\
+ '784be19dae83c02e6f281358d4ebd11d'\
+ '7723b4f5ea0e357907d5443decc5f93c'\
+ '1e9dbd9afa775903324dbd6028f4e78f'\
+ '784b39dbc2288ef44b5f95332cb777e3'\
+ '1103e840dba680634aa806f5c9b10006'\
+ '1802bd9afa775903324dbd6028f4e78f'\
+ '784b32f5940ca29dd812a2c145e6fc89'\
+ '646628ffcc7c7a42cae512337d8d29c4'\
+ '0bbdbd9afa775903324dbd6028f4e78f'\
+ '784b10d45fcba396aef3153ee8f6ecae'\
+ '58afe8476a280a2026fc71f6217dcf49'\
+ 'ba2fbd9afa775903324dbd6028f4e78f'\
+ '784b4b8668a5d465bcdd9000aa8dfcff'\
+ '42044fcbd0aece32fc7011a83e9160e8'\
+ '9f09bd9afa775903324dbd6028f4e78f'\
+ '784b89f3d1f6e485c334cd059d0995e3'\
+ 'cdfdc00571b1849854847a44dc5548e2'\
+ 'dcfbbd9afa775903324dbd6028f4e78f'\
+ '784bc9ec350406f26e559affb4030de2'\
+ 'ebde5435054c35a998605b8fcf04972d'\
+ '8d55bd9afa775903324dbd6028f4e78f'\
+ '784bb3e506340fbf6b5786973393079f'\
+ '24b66ba46507e35e911db0362a2acde9'\
+ '7049bd9afa775903324dbd6028f4e78f'\
+ '784b9f1863ed5717c394b42ef10a6607'\
+ 'b144a65ba11fb6579df94b8eb2f0c4cd'\
+ '60c1bd9afa775903324dbd6028f4e78f'\
+ '784bdd59af56084406e38c63fbe0850f'\
+ '30a0cd1277462a2192590fb05bc259e6'\
+ '1273bd9afa775903324dbd6028f4e78f'\
+ '784bdbaf9e056d3d5b38b68553304abc'\
+ '88827ebc00f80cb9c7e197cdbc5822cd'\
+ '316cbd9afa775903324dbd6028f4e78f'\
+ '784b65f3c0a01b8402d362b9722e98f7'\
+ '5e5e991e6c186e934f7b2b2e6be6dec8'\
+ '00ecbd9afa775903324dbd6028f4e78f'\
+ '784b5b248e913d71853d3da5aedd8d9a'\
+ '4bc57a917126573817fb5fcb2d86a2f1'\
+ 'c886bd9afa775903324dbd6028f4e78f'\
+ '784b2679650fe341f2cf1ea883460b35'\
+ '56aaaf77a70d6b8dc484c9301d1b746c'\
+ 'f7b5bd9afa775903324dbd6028f4e78f'\
+ '784bbb1dd16d530008636f232303a7a8'\
+ '6f3dff969f848815c0574b12c2d787fe'\
+ 'c93fbd9afa775903324dbd6028f4e78f'\
+ '784b0ce02100f67c7ef85f4eed368f02'\
+ 'bf7092380a3c23ca91fd7f19430d94b0'\
+ '0c19bd9afa775903324dbd6028f4e78f'\
+ '784b95049f0e4137c790b0d2767195e5'\
+ '6f73807d123adcf8f6e7bf2d4d991d30'\
+ '5f89bd9afa775903324dbd6028f4e78f'\
+ '784b02e6216acaef6401401fa555ecbe'\
+ 'd940b1a5f2569aed92956137ae58482e'\
+ 'f1b7bd9afa775903324dbd6028f4e78f'\
+ '784b6efefe0b5b01478b7b944c10d3a8'\
+ 'aca2cca4208888e2059f8a06cb5824d7'\
+ 'bab0bd9afa775903324dbd6028f4e78f'\
+ '784b9d00ae4cd47a41c783dc48f342c0'\
+ '76c2c16f3413f4d2df50d181ca3bb5ad'\
+ '859dbd9afa775903324dbd6028f4e78f'\
+ '784bd8d4e6ddf6e42d74a6a536ea62fd'\
+ '1217e4290b145c9e5c3695a31b42efb5'\
+ 'f5a4bd9afa775903324dbd6028f4e78f'\
+ '784bf277af4f9bdc918ae89fa35cc1b3'\
+ '4e34984c04ae9765322c3cb049574d36'\
+ '509cbd9afa775903324dbd6028f4e78f'\
+ '784b0dc24c75eb1aef56b9f13ab9de60'\
+ 'e2eca1c4510034e290bbb36cf60a549b'\
+ '234cbd9afa775903324dbd6028f4e78f'\
+ '784b835881f2a5572d7059b5c8635018'\
+ '552892e945626f115fc9ca07acf7bde8'\
+ '57a4bd9afa775903324dbd6028f4e78f'\
+ '784bbadff5e4f0fea711701ca8fb22e4'\
+ 'c43821e31e210cf52d1d4f74dd50f1d0'\
+ '39bcbd9afa775903324dbd6028f4e78f'\
+ '784bc452ab846073df5ace25cca64d6b'\
+ '7a09d906308a1a65eb5240e3c4ebcaa9'\
+ 'cc0cbd9afa775903324dbd6028f4e78f'\
+ '784bf1863ec8b7f43f94ad14fb0b8b4a'\
+ '69497a8c65ecbc2a55e0bb420e772b8c'\
+ 'dc91bd9afa775903324dbd6028f4e78f'\
+ '784b7bc9cb5463ce0f011fb5085eb8ba'\
+ '77d1acd283c43f4a57603cc113f22ceb'\
+ 'c579bd9afa775903324dbd6028f4e78f'\
+ '784be800395dbe0e045781e8005178b4'\
+ 'baf5a257f06e159121a67c595f6ae225'\
+ '06fdbd9afa775903324dbd6028f4e78f'\
+ '784b1cb4dccaf2c812cfa7b4938e1371'\
+ 'fe2b96910fe407216fd95428672d6c7e'\
+ '7316bd9afa775903324dbd6028f4e78f'\
+ '784b3ece27cbb3ec4438cce523b927c4'\
+ 'f05fdc5c593a3766db984c5e437a3ff6'\
+ 'a16bbd9afa775903324dbd6028f4e78f'\
+ '784b68ee4632c7be1c66c83e89dd93ea'\
+ 'ee1294159abf45b4c2c72d7dc7499aa2'\
+ 'a043bd9afa775903324dbd6028f4e78f'\
+ '784be24b315a551671483d8b9073b32d'\
+ 'e11b4de1eb2eab211afd2d9c319ff55e'\
+ '08d0bd9afa775903324dbd6028f4e78f'\
+ '784be7c20b3ab481ec885501eca52937'\
+ '81d84b5a1ac24f88266b5270e7ecb4aa'\
+ '2538bd9afa775903324dbd6028f4e78f'\
+ '784bdccc3ce1c00ee4b0b10487d372a0'\
+ 'fa47f5c26f57a359be7b27801e144eac'\
+ 'bac4bd9afa775903324dbd6028f4e78f'\
+ '784b0257ff710f2a16e489b37493c076'\
+ '04a7cda96129d8a8fd68d2b6af633904'\
+ '315dbd9afa775903324dbd6028f4e78f'\
+ '784b3a91f0f9e5287fa2994c7d930b2c'\
+ '1a5ee14ce8e1c8304ae495adc58cc445'\
+ '3c0cbd9afa775903324dbd6028f4e78f'\
+ '784b495300790e6c9bf2510daba59db3'\
+ 'd57e9d2b85d7d7640434ec75baa3851c'\
+ '74e5bd9afa775903324dbd6028f4e78f'\
+ '784b81a8b2c9751aeb1faba7dbde5ee9'\
+ '691dc0eaee2a31c38b1491a8146756a6'\
+ 'b770bd9afa775903324dbd6028f4e78f'\
+ '784b8e53efdc15f852cee5a6e92931bc'\
+ '42e6163cd30ff649cca7e87252c3a459'\
+ '960bbd9afa775903324dbd6028f4e78f'\
+ '784b992d359aa7a5f789d268b94c11b9'\
+ '485a6b1ce64362b0edb4441ccc187c39'\
+ '647bbd9afa775903324dbd6028f4e78f'\
+ '784b9fa4d5023fd43ecaff4200ba7e8d'\
+ '4353259d2b7e5e72b5096eff8027d66d'\
+ '1043bd9afa775903324dbd6028f4e78f'\
+ '784bd372c0d0f4fdc9f52e9e1f23fc56'\
+ 'ee72414a17f350d0cea6c26a35a6c321'\
+ '7a13bd9afa775903324dbd6028f4e78f'\
+ '784b5c5805196a85e93789457017d4f9'\
+ 'eb6828b97c41cb9ba6d3dc1fcc115f52'\
+ '7a55bd9afa775903324dbd6028f4e78f'\
+ '784b03f64a29948a88beffdb035e0b09'\
+ 'a7370ccf0cd9ce6bcf8e640c2107318f'\
+ 'ab87bd9afa775903324dbd6028f4e78f'\
+ '784b05d87e15713454616f5b0ed7849a'\
+ 'b5c1712ab84f02349478ec2a38f970c0'\
+ '1489bd9afa775903324dbd6028f4e78f'\
+ '784b06eb5badd26e4fae65f9a42358de'\
+ 'ef7c18e52cc05fbb7fc76776e69d1b98'\
+ '2a14bd9afa775903324dbd6028f4e78f'\
+ '784b08bb2289e9e91b4d20ff3f156251'\
+ '6ab07e979b2c6cefe2ab70c6dfc1199f'\
+ '8da5bd9afa775903324dbd6028f4e78f'\
+ '784b0928f0408bf725e61d67d87138a8'\
+ 'eebc52962d2847f16e3587163b160e41'\
+ 'b6adbd9afa775903324dbd6028f4e78f'\
+ '784b09f98aa90f85198c0d73f89ba77e'\
+ '87ec6f596c491350fb8f8bba80a62fbb'\
+ '914bbd9afa775903324dbd6028f4e78f'\
+ '784b0a75ea0b1d70eaa4d3f374246db5'\
+ '4fc7b43e7f596a353309b9c36b4fd975'\
+ '725ebd9afa775903324dbd6028f4e78f'\
+ '784b0c51d7906fc4931149765da88682'\
+ '426b2cfe9e6aa4f27253eab400111432'\
+ 'e3a7bd9afa775903324dbd6028f4e78f'\
+ '784b0fa3a29ad05130d7fe5bf4d25965'\
+ '63cded1d874096aacc181069932a2e49'\
+ '519abd9afa775903324dbd6028f4e78f'\
+ '784b147730b42f11fe493fe902b6251e'\
+ '97cd2b6f34d36af59330f11d02a42f94'\
+ '0d07bd9afa775903324dbd6028f4e78f'\
+ '784b148fe18f715a9fcfe1a444ce0fff'\
+ '7f85869eb422330dc04b314c0f295d6d'\
+ 'a79ebd9afa775903324dbd6028f4e78f'\
+ '784b1b909115a8d473e51328a87823bd'\
+ '621ce655dfae54fa2bfa72fdc0298611'\
+ 'd6b8bd9afa775903324dbd6028f4e78f'\
+ '784b1d8b58c1fdb8da8b33ccee1e5f97'\
+ '3af734d90ef317e33f5db1573c2ba088'\
+ 'a80cbd9afa775903324dbd6028f4e78f'\
+ '784b1f179186efdf5ef2de018245ba0e'\
+ 'ae8134868601ba0d35ff3d9865c1537c'\
+ 'ed93bd9afa775903324dbd6028f4e78f'\
+ '784b270c84b29d86f16312b06aaae4eb'\
+ 'b8dff8de7d080d825b8839ff1766274e'\
+ 'ff47bd9afa775903324dbd6028f4e78f'\
+ '784b29cca4544ea330d61591c784695c'\
+ '149c6b040022ac7b5b89cbd72800d108'\
+ '40eabd9afa775903324dbd6028f4e78f'\
+ '784b2b2298eaa26b9dc4a4558ae92e7b'\
+ 'b0e4f85cf34bf848fdf636c0c11fbec4'\
+ '9897bd9afa775903324dbd6028f4e78f'\
+ '784b2dcf8e8d817023d1e8e1451a3d68'\
+ 'd6ec30d9bed94cbcb87f19ddc1cc0116'\
+ 'ac1abd9afa775903324dbd6028f4e78f'\
+ '784b311a2ac55b50c09b30b3cc93b994'\
+ 'a119153eeeac54ef892fc447bbbd9610'\
+ '1aa1bd9afa775903324dbd6028f4e78f'\
+ '784b32ad3296829bc46dcfac5eddcb9d'\
+ 'bf2c1eed5c11f83b2210cf9c6e60c798'\
+ 'd4a7bd9afa775903324dbd6028f4e78f'\
+ '784b340da32b58331c8e2b561baf300c'\
+ 'a9dfd6b91cd2270ee0e2a34958b1c625'\
+ '9e85bd9afa775903324dbd6028f4e78f'\
+ '784b362ed31d20b1e00392281231a96f'\
+ '0a0acfde02618953e695c9ef2eb0bac3'\
+ '7550bd9afa775903324dbd6028f4e78f'\
+ '784b367a31e5838831ad2c074647886a'\
+ '6cdff217e6b1ba910bff85dc7a87ae9b'\
+ '5e98bd9afa775903324dbd6028f4e78f'\
+ '784b3765d769c05bf98b427b3511903b'\
+ '2137e8a49b6f859d0af159ed6a86786a'\
+ 'a634bd9afa775903324dbd6028f4e78f'\
+ '784b386d695cdf2d4576e01bcaccf5e4'\
+ '9e78da51af9955c0b8fa7606373b0079'\
+ '94b3bd9afa775903324dbd6028f4e78f'\
+ '784b3a4f74beafae2b9383ad8215d233'\
+ 'a6cf3d057fb3c7e213e897beef4255fa'\
+ 'ee9dbd9afa775903324dbd6028f4e78f'\
+ '784b3ae76c45ca70e9180c1559981f42'\
+ '622dd251bca1fbe6b901c52ec11673b0'\
+ '3514bd9afa775903324dbd6028f4e78f'\
+ '784b3be8e7eb348d35c1928f19c76984'\
+ '6788991641d1f6cf09514ca10269934f'\
+ '7359bd9afa775903324dbd6028f4e78f'\
+ '784b3e3926f0b8a15ad5a14167bb647a'\
+ '843c3d4321e35dbc44dce8c837417f2d'\
+ '28b0bd9afa775903324dbd6028f4e78f'\
+ '784b400ac66d59b7b094a9e30b01a6bd'\
+ '013aff1d30570f83e7592f421dbe5ff4'\
+ 'ba8fbd9afa775903324dbd6028f4e78f'\
+ '784b4185821f6dab5ba8347b78a22b5f'\
+ '9a0a7570ca5c93a74d478a793d83bac4'\
+ '9805bd9afa775903324dbd6028f4e78f'\
+ '784b41d1eeb177c0324e17dd6557f384'\
+ 'e532de0cf51a019a446b01efb351bc25'\
+ '9d77bd9afa775903324dbd6028f4e78f'\
+ '784b45876b4dd861d45b3a9480077402'\
+ '7a5db45a48b2a729410908b6412f8a87'\
+ 'e95dbd9afa775903324dbd6028f4e78f'\
+ '784b4667bf250cd7c1a06b8474c613cd'\
+ 'b1df648a7f58736fbf57d05d6f755dab'\
+ '67f4bd9afa775903324dbd6028f4e78f'\
+ '784b47ff1b63b140b6fc04ed79131331'\
+ 'e651da5b2e2f170f5daef4153dc2fbc5'\
+ '32b1bd9afa775903324dbd6028f4e78f'\
+ '784b57e6913afacc5222bd76cdaf31f8'\
+ 'ed88895464255374ef097a82d7f59ad3'\
+ '9596bd9afa775903324dbd6028f4e78f'\
+ '784b5890fa227121c76d90ed9e63c87e'\
+ '3a6533eea0f6f0a1a23f1fc445139bc6'\
+ 'bcdfbd9afa775903324dbd6028f4e78f'\
+ '784b5d1e9acbbb4a7d024b6852df0259'\
+ '70e2ced66ff622ee019cd0ed7fd841cc'\
+ 'ad02bd9afa775903324dbd6028f4e78f'\
+ '784b61cec4a377bf5902c0feaee37034'\
+ 'bf97d5bc6e0615e23a1cdfbae6e3f5fb'\
+ '3cfdbd9afa775903324dbd6028f4e78f'\
+ '784b631f0857b41845362c90c6980b4b'\
+ '10c4b628e23dbe24b6e96c128ae3dcb0'\
+ 'd5acbd9afa775903324dbd6028f4e78f'\
+ '784b65b2e7cc18d903c331df1152df73'\
+ 'ca0dc932d29f17997481c56f3087b2dd'\
+ '3147bd9afa775903324dbd6028f4e78f'\
+ '784b66aa13a0edc219384d9c425d3927'\
+ 'e6ed4a5d1940c5e7cd4dac88f5770103'\
+ 'f2f1bd9afa775903324dbd6028f4e78f'\
+ '784b6873d2f61c29bd52e954eeff5977'\
+ 'aa8367439997811a62ff212c948133c6'\
+ '8d97bd9afa775903324dbd6028f4e78f'\
+ '784b6dbbead23e8c860cf8b47f74fbfc'\
+ 'a5204de3e28b881313bb1d1eccdc4747'\
+ '934ebd9afa775903324dbd6028f4e78f'\
+ '784b6dead13257dfc3ccc6a4b37016ba'\
+ '91755fe9e0ec1f415030942e5abc47f0'\
+ '7c88bd9afa775903324dbd6028f4e78f'\
+ '784b70a1450af2ad395569ad0afeb1d9'\
+ 'c125324ee90aec39c258880134d4892d'\
+ '51abbd9afa775903324dbd6028f4e78f'\
+ '784b72c26f827ceb92989798961bc6ae'\
+ '748d141e05d3ebcfb65d9041b266c920'\
+ 'be82bd9afa775903324dbd6028f4e78f'\
+ '784b781764102188a8b4b173d4a8f5ec'\
+ '94d828647156097f99357a581e624b37'\
+ '7509bd9afa775903324dbd6028f4e78f'\
+ '784b788383a4c733bb87d2bf51673dc7'\
+ '3e92df15ab7d51dc715627ae77686d8d'\
+ '23bcbd9afa775903324dbd6028f4e78f'\
+ '784b78b4edcaabc8d9093e20e217802c'\
+ 'aeb4f09e23a3394c4acc6e87e8f35395'\
+ '310fbd9afa775903324dbd6028f4e78f'\
+ '784b7f49ccb309323b1c7ab11c93c955'\
+ 'b8c744f0a2b75c311f495e1890607050'\
+ '0027bd9afa775903324dbd6028f4e78f'\
+ '784b82acba48d5236ccff7659afc1459'\
+ '4dee902bd6082ef1a30a0b9b508628cf'\
+ '34f4bd9afa775903324dbd6028f4e78f'\
+ '784b894d7839368f3298cc915ae8742e'\
+ 'f330d7a26699f459478cf22c2b6bb285'\
+ '0166bd9afa775903324dbd6028f4e78f'\
+ '784b8c0349d708571ae5aa21c1136348'\
+ '2332073297d868f29058916529efc520'\
+ 'ef70bd9afa775903324dbd6028f4e78f'\
+ '784b8d93d60c691959651476e5dc464b'\
+ 'e12a85fa5280b6f524d4a1c3fcc9d048'\
+ 'cfadbd9afa775903324dbd6028f4e78f'\
+ '784b9063f5fbc5e57ab6de6c94881460'\
+ '20e172b176d5ab57d4c89f0f600e17fe'\
+ '2de2bd9afa775903324dbd6028f4e78f'\
+ '784b91656aa4ef493b3824a0b7263248'\
+ 'e4e2d657a5c8488d880cb65b01730932'\
+ 'fb53bd9afa775903324dbd6028f4e78f'\
+ '784b91971c1497bf8e5bc68439acc48d'\
+ '63ebb8faabfd764dcbe82f3ba977cac8'\
+ 'cf6abd9afa775903324dbd6028f4e78f'\
+ '784b947078f97c6196968c3ae99c9a5d'\
+ '58667e86882cf6c8c9d58967a496bb7a'\
+ 'f43cbd9afa775903324dbd6028f4e78f'\
+ '784b96e4509450d380dac362ff8e2955'\
+ '89128a1f1ce55885d20d89c27ba2a9d0'\
+ '0909bd9afa775903324dbd6028f4e78f'\
+ '784b9783b5ee4492e9e891c655f1f480'\
+ '35959dad453c0e623af0fe7bf2c0a578'\
+ '85e3bd9afa775903324dbd6028f4e78f'\
+ '784b97a51a094444620df38cd8c6512c'\
+ 'ac909a75fd437ae1e4d2292980766123'\
+ '8127bd9afa775903324dbd6028f4e78f'\
+ '784b97a8c5ba11d61fefbb5d6a05da4e'\
+ '15ba472dc4c6cd4972fc1a035de32134'\
+ '2fe4bd9afa775903324dbd6028f4e78f'\
+ '784b992820e6ec8c41daae4bd8ab48f5'\
+ '8268e943a670d35ca5e2bdcd3e7c4c94'\
+ 'a072bd9afa775903324dbd6028f4e78f'\
+ '784b9954a1a99d55e8b189ab1bca414b'\
+ '91f6a017191f6c40a86b6f3ef368dd86'\
+ '0031bd9afa775903324dbd6028f4e78f'\
+ '784b9baf4f76d76bf5d6a897bfbd5f42'\
+ '9ba14d04e08b48c3ee8d76930a828fff'\
+ '3891bd9afa775903324dbd6028f4e78f'\
+ '784b9c259fcb301d5fc7397ed5759963'\
+ 'e0ef6b36e42057fd73046e6bd08b149f'\
+ '751cbd9afa775903324dbd6028f4e78f'\
+ '784b9dd2dcb72f5e741627f2e9e03ab1'\
+ '8503a3403cf6a904a479a4db05d97e22'\
+ '50a9bd9afa775903324dbd6028f4e78f'\
+ '784b9ed33f0fbc180bc032f8909ca2c4'\
+ 'ab3418edc33a45a50d2521a3b5876aa3'\
+ 'ea2cbd9afa775903324dbd6028f4e78f'\
+ '784ba4d978b7c4bda15435d508f8b959'\
+ '2ec2a5adfb12ea7bad146a35ecb53094'\
+ '642fbd9afa775903324dbd6028f4e78f'\
+ '784ba924d3cad6da42b7399b96a095a0'\
+ '6f18f6b1aba5b873b0d5f3a0ee2173b4'\
+ '8b6cbd9afa775903324dbd6028f4e78f'\
+ '784bad3be589c0474e97de5bb2bf3353'\
+ '4948b76bb80376dfdc58b1fed767b5a1'\
+ '5bfcbd9afa775903324dbd6028f4e78f'\
+ '784bb8d6b5e7857b45830e017c7be3d8'\
+ '56adeb97c7290eb0665a3d473a4beb51'\
+ 'dcf3bd9afa775903324dbd6028f4e78f'\
+ '784bb93f0699598f8b20fa0dacc12cfc'\
+ 'fc1f2568793f6e779e04795e6d7c2253'\
+ '0f75bd9afa775903324dbd6028f4e78f'\
+ '784bbb01da0333bb639c7e1c806db056'\
+ '1dc98a5316f22fef1090fb8d0be46dae'\
+ '499abd9afa775903324dbd6028f4e78f'\
+ '784bbc75f910ff320f5cb5999e66bbd4'\
+ '034f4ae537a42fdfef35161c5348e366'\
+ 'e216bd9afa775903324dbd6028f4e78f'\
+ '784bbdd01126e9d85710d3fe75af1cc1'\
+ '702a29f081b4f6fdf6a2b2135c0297a9'\
+ 'cec5bd9afa775903324dbd6028f4e78f'\
+ '784bbe435df7cd28aa2a7c8db4fc8173'\
+ '475b77e5abf392f76b7c76fa3f698cb7'\
+ '1a9abd9afa775903324dbd6028f4e78f'\
+ '784bbef7663be5ea4dbfd8686e24701e'\
+ '036f4c03fb7fcd67a6c566ed94ce09c4'\
+ '4470bd9afa775903324dbd6028f4e78f'\
+ '784bc2469759c1947e14f4b65f72a9f5'\
+ 'b3af8b6f6e727b68bb0d91385cbf4217'\
+ '6a8abd9afa775903324dbd6028f4e78f'\
+ '784bc3505bf3ec10a51dace417c76b8b'\
+ 'd10939a065d1f34e75b8a3065ee31cc6'\
+ '9b96bd9afa775903324dbd6028f4e78f'\
+ '784bc42d11c70ccf5e8cf3fb91fdf21d'\
+ '884021ad836ca68adf2cbb7995c10bf5'\
+ '88d4bd9afa775903324dbd6028f4e78f'\
+ '784bc69d64a5b839e41ba16742527e17'\
+ '056a18ce3c276fd26e34901a1bc7d0e3'\
+ '2219bd9afa775903324dbd6028f4e78f'\
+ '784bcb340011afeb0d74c4a588b36eba'\
+ 'a441961608e8d2fa80dca8c13872c850'\
+ '796bbd9afa775903324dbd6028f4e78f'\
+ '784bcc8eec6eb9212cbf897a5ace7e8a'\
+ 'beece1079f1a6def0a789591cb1547f1'\
+ 'f084bd9afa775903324dbd6028f4e78f'\
+ '784bcf13a243c1cd2e3c8ceb7e701003'\
+ '87cecbfb830525bbf9d0b70c79adf3e8'\
+ '4128bd9afa775903324dbd6028f4e78f'\
+ '784bd89a11d16c488dd4fbbc541d4b07'\
+ 'faf8670d660994488fe54b1fbff2704e'\
+ '4288bd9afa775903324dbd6028f4e78f'\
+ '784bd9668ab52785086786c134b5e4bd'\
+ 'dbf72452813b6973229ab92aa1a54d20'\
+ '1bf5bd9afa775903324dbd6028f4e78f'\
+ '784bda3560fd0c32b54c83d4f2ff8690'\
+ '03d2089369acf2c89608f8afa7436bfa'\
+ '4655bd9afa775903324dbd6028f4e78f'\
+ '784bdf02aab48387a9e1d4c65228089c'\
+ 'b6abe196c8f4b396c7e4bbc395de1369'\
+ '77f6bd9afa775903324dbd6028f4e78f'\
+ '784bdf91ac85a94fcd0cfb8155bd7cbe'\
+ 'faac14b8c5ee7397fe2cc85984459e2e'\
+ 'a14ebd9afa775903324dbd6028f4e78f'\
+ '784be051b788ecbaeda53046c70e6af6'\
+ '058f95222c046157b8c4c1b9c2cfc65f'\
+ '46e5bd9afa775903324dbd6028f4e78f'\
+ '784be36dfc719d2114c2e39aea88849e'\
+ '2845ab326f6f7fe74e0e539b7e54d81f'\
+ '3631bd9afa775903324dbd6028f4e78f'\
+ '784be39891f48bbcc593b8ed86ce82ce'\
+ '666fc1145b9fcbfd2b07bad0a89bf4c7'\
+ 'bfbfbd9afa775903324dbd6028f4e78f'\
+ '784be6856f137f79992dc94fa2f43297'\
+ 'ec32d2d9a76f7be66114c6a13efc3bcd'\
+ 'f5c8bd9afa775903324dbd6028f4e78f'\
+ '784beaff8c85c208ba4d5b6b8046f5d6'\
+ '081747d779bada7768e649d047ff9b1f'\
+ '660cbd9afa775903324dbd6028f4e78f'\
+ '784bee83a566496109a74f6ac6e410df'\
+ '00bb29a290e0021516ae3b8a23288e7e'\
+ '2e72bd9afa775903324dbd6028f4e78f'\
+ '784beed7e0eff2ed559e2a79ee361f99'\
+ '62af3b1e999131e30bb7fd07546fae0a'\
+ '7267bd9afa775903324dbd6028f4e78f'\
+ '784bf1b4f6513b0d544a688d13adc291'\
+ 'efa8c59f420ca5dcb23e0b5a06fa7e0d'\
+ '083dbd9afa775903324dbd6028f4e78f'\
+ '784bf2a16d35b554694187a70d40ca68'\
+ '2959f4f35c2ce0eab8fd64f7ac2ab9f5'\
+ 'c24abd9afa775903324dbd6028f4e78f'\
+ '784bf31fd461c5e99510403fc97c1da2'\
+ 'd8a9cbe270597d32badf8fd66b77495f'\
+ '8d94bd9afa775903324dbd6028f4e78f'\
+ '784bf48e6dd8718e953b60a24f2cbea6'\
+ '0a9521deae67db25425b7d3ace3c517d'\
+ 'd9b7bd9afa775903324dbd6028f4e78f'\
+ '784bc805603c4fa038776e42f263c604'\
+ 'b49d96840322e1922d5606a9b0bbb5bf'\
+ 'fe6fbd9afa775903324dbd6028f4e78f'\
+ '784b1f16078cce009df62edb9e7170e6'\
+ '6caae670bce71b8f92d38280c56aa372'\
+ '031dbd9afa775903324dbd6028f4e78f'\
+ '784b37a480374daf6202ce790c318a2b'\
+ 'b8aa3797311261160a8e30558b7dea78'\
+ 'c7a6bd9afa775903324dbd6028f4e78f'\
+ '784b408b8b3df5abb043521a49352502'\
+ '3175ab1261b1de21064d6bf247ce1421'\
+ '53b9bd9afa775903324dbd6028f4e78f'\
+ '784b540801dd345dc1c33ef431b35bf4'\
+ 'c0e68bd319b577b9abe1a9cff1cbc39f'\
+ '548fbd9afa775903324dbd6028f4e78f'\
+ '784b040b3bc339e9b6f9acd828b88f34'\
+ '82a5c3f64e67e5a714ba1da8a70453b3'\
+ '4af6bd9afa775903324dbd6028f4e78f'\
+ '784b1142a0cc7c9004dff64c5948484d'\
+ '6a7ec3514e176f5ca6bdeed7a093940b'\
+ '93ccbd9afa775903324dbd6028f4e78f'\
+ '784b288878f12e8b9c6ccbf601c73d5f'\
+ '4e985cac0ff3fcb0c24e4414912b3eb9'\
+ '1f15bd9afa775903324dbd6028f4e78f'\
+ '784b2ea4cb6a1f1eb1d3dce82d54fde2'\
+ '6ded243ba3e18de7c6d211902a594fe5'\
+ '6788bd9afa775903324dbd6028f4e78f'\
+ '784b40d6cae02973789080cf4c3a9ad1'\
+ '1b5a0a4d8bba4438ab96e276cc784454'\
+ 'dee7bd9afa775903324dbd6028f4e78f'\
+ '784b4f0214fce4fa8897d0c80a46d6da'\
+ 'b4124726d136fc2492efd01bfedfa388'\
+ '7a9cbd9afa775903324dbd6028f4e78f'\
+ '784b5c2afe34bd8a7aebbb439c251dfb'\
+ '6a424f00e535ac4df61ec19745b6f10e'\
+ '893abd9afa775903324dbd6028f4e78f'\
+ '784b99d7ada0d67e5233108dbd76702f'\
+ '4b168087cfc4ec65494d6ca8aba858fe'\
+ 'badabd9afa775903324dbd6028f4e78f'\
+ '784ba608a87f51bdf7532b4b80fa95ea'\
+ 'dfdf1bf8b0cbb58a7d3939c9f11c12e7'\
+ '1c85bd9afa775903324dbd6028f4e78f'\
+ '784bbdd4086c019f5d388453c6d93475'\
+ 'd39a576572baff75612c321b46a35a53'\
+ '29b1bd9afa775903324dbd6028f4e78f'\
+ '784bcb994b400590b66cbf55fc663555'\
+ 'caf0d4f1ce267464d0452c2361e05ee1'\
+ 'cd50bd9afa775903324dbd6028f4e78f'\
+ '784bd6ee8db782e36caffb4d9f820790'\
+ '0487de930aabcc1d196fa455fbfd6f37'\
+ '273dbd9afa775903324dbd6028f4e78f'\
+ '784bdda0121dcf167db1e2622d10f454'\
+ '701837ac6af304a03ec06b3027904988'\
+ 'c56bbd9afa775903324dbd6028f4e78f'\
+ '784be42572afac720f5d4a1c7aaaf802'\
+ 'f094daceb682f4e92783b2bb3fa00862'\
+ 'af7fbd9afa775903324dbd6028f4e78f'\
+ '784be6236dc1ee074c077c7a1c9b3965'\
+ '947430847be125f7aeb71d91a128133a'\
+ 'ea7fbd9afa775903324dbd6028f4e78f'\
+ '784bef87be89a413657de8721498552c'\
+ 'f9e0f3c1f71bc62dfa63b9f25bbc66e8'\
+ '6494bd9afa775903324dbd6028f4e78f'\
+ '784bf5e892dd6ec4c2defa4a495c0921'\
+ '9b621379b64da3d1b2e34adf4b5f1102'\
+ 'bd39bd9afa775903324dbd6028f4e78f'\
+ '784bd4241190cd5a369d8c344c660e24'\
+ 'f3027fb8e7064fab33770e93fa765ffb'\
+ '152ebd9afa775903324dbd6028f4e78f'\
+ '784b23142e14424fb3ff4efc75d00b63'\
+ '867727841aba5005149070ee2417df8a'\
+ 'b799bd9afa775903324dbd6028f4e78f'\
+ '784b91721aa76266b5bb2f8009f11885'\
+ '10a36e54afd56e967387ea7d0b114d78'\
+ '2089bd9afa775903324dbd6028f4e78f'\
+ '784bdc8aff7faa9d1a00a3e32eefbf89'\
+ '9b3059cbb313a48b82fa9c8d931fd58f'\
+ 'b69dbd9afa775903324dbd6028f4e78f'\
+ '784b9959ed4e05e548b59f219308a455'\
+ '63ea85bb224c1ad96dec0e96c0e71ffc'\
+ 'cd81bd9afa775903324dbd6028f4e78f'\
+ '784b47b31a1c7867644b2ee8093b2d5f'\
+ 'be21e21f77c1617a2c08812f57ace085'\
+ '0e9fbd9afa775903324dbd6028f4e78f'\
+ '784bfabc379df395e6f52472b44fa508'\
+ '2f9f0e0da480f05198c66814b7055b03'\
+ 'f446bd9afa775903324dbd6028f4e78f'\
+ '784be37ff3fc0eff20bfc1c060a4bf56'\
+ '885e1efd55a8e9ce3c5f4869444cacff'\
+ 'ad0bbd9afa775903324dbd6028f4e78f'\
+ '784b4cdae3920a512c9c052a8b4aba90'\
+ '96969b0a0197b614031e4c64a5d898cb'\
+ '09b9bd9afa775903324dbd6028f4e78f'\
+ '784b5b89f1aa2435a03d18d9b203d17f'\
+ 'b4fba4f8f5076cf1f9b8d6d9b8262222'\
+ '35c1bd9afa775903324dbd6028f4e78f'\
+ '784b007f4c95125713b112093e21663e'\
+ '2d23e3c1ae9ce4b5de0d58a297332336'\
+ 'a2d8bd9afa775903324dbd6028f4e78f'\
+ '784be060da09561ae00dcfb1769d6e8e'\
+ '846868a1e99a54b14aa5d0689f2840ce'\
+ 'c6dfbd9afa775903324dbd6028f4e78f'\
+ '784b48f4584de1c5ec650c25e6c62363'\
+ '5ce101bd82617fc400d4150f0aee2355'\
+ 'b4cabd9afa775903324dbd6028f4e78f'\
+ '784baf79b14064601bc0987d4747af1e'\
+ '914a228c05d622ceda03b7a4f67014fe'\
+ 'e767bd9afa775903324dbd6028f4e78f'\
+ '784bc3d65e174d47d3772cb431ea599b'\
+ 'ba76b8670bfaa51081895796432e2ef6'\
+ '461fbd9afa775903324dbd6028f4e78f'\
+ '784b1e918f170a796b4b0b1400bb9bda'\
+ 'e75be1cf86705c2d0fc8fb9dd0c5016b'\
+ '933bbd9afa775903324dbd6028f4e78f'\
+ '784b66d0803e2550d9e790829ae1b5f8'\
+ '1547cc9bfbe69b51817068ecb5dabb7a'\
+ '89fcbd9afa775903324dbd6028f4e78f'\
+ '784b284153e7d04a9f187e5c3dbfe17b'\
+ '2672ad2fbdd119f27bec789417b79198'\
+ '53ecbd9afa775903324dbd6028f4e78f'\
+ '784bedd2cb55726e10abedec9de8ca5d'\
+ 'ed289ad793ab3b6919d163c875fec120'\
+ '9cd5bd9afa775903324dbd6028f4e78f'\
+ '784b90aec5c4995674a849c1d1384463'\
+ 'f3b02b5aa625a5c320fc4fe7d9bb58a6'\
+ '2398'
+
def sha256_esl_builder(initList: []):
"""
@@ -948,3 +1812,17 @@ def test_EfiSignatureDatabase_Sort_and_Deduplication(self):
self.assertEqual(output_dupes_esd.GetBytes(), ExpectedDupesEsd.GetBytes())
self.assertEqual(output_canonical_esd.GetBytes(), ExpectedCanonicalEsd.GetBytes())
+
+ def test_EFiVariableAuthentication2_load_from_file(self):
+ # translate the DBXFILE to a ByteIo stream
+ DbxIo = io.BytesIO(bytes.fromhex(DBXFILE))
+
+ # Use EFiVariableAuthentication2 to decode the DBX file
+ EfiAuthVar = EFiVariableAuthentication2(decodefs=DbxIo)
+
+ # Write the decoded Dbx File back into a ByteIo stream
+ Output = io.BytesIO()
+ EfiAuthVar.Write(Output)
+
+ # Assert if the Decoded version does not match the original
+ self.assertEqual(Output.getvalue(), DbxIo.getvalue())
|
[Bug]: Loading EFiVariableAuthentication2 from Filestream results in exception
### Contact Details
[email protected]
### Describe the Bug
Downloading the latest [DBX from UEFI.org](https://uefi.org/sites/default/files/resources/DBXUpdate.bin) and attempting to load it through the Edk2ToolLib\uefi\authenticated_variables_structure_support.py's EFiVariableAuthentication2 class results in an exception.
` File "site-packages\edk2toollib\uefi\authenticated_variables_structure_support.py", line 849, in __init__
self.PopulateFromFileStream(decodefs)
File "site-packages\edk2toollib\uefi\authenticated_variables_structure_support.py", line 862, in PopulateFromFileStream
self.EfiTime = EfiTime(decodefs=fs)
File "site-packages\edk2toollib\uefi\authenticated_variables_structure_support.py", line 766, in __init__
self.PopulateFromFileStream(decodefs)
File "site-packages\edk2toollib\uefi\authenticated_variables_structure_support.py", line 802, in PopulateFromFileStream
self.Time = datetime.datetime(Year, Month, Day, Hour, Minute, Second, NanoSecond / 1000)
TypeError: 'float' object cannot be interpreted as an integer`
### Reproduction steps
1. Download the dbx
2. run the following python code
Example python code
` from edk2toollib.uefi.authenticated_variables_structure_support import EFiVariableAuthentication2
fi = open("dbx.bin","rb")
Db = EFiVariableAuthentication2(decodefs=fi)
`
### Expected behavior
It looks like the data unpacked from the file stream is attempting to be manipulated by dividing by 1000.
I don't think the division is necessary, and removing it allows the operation to complete correctly
def PopulateFromFileStream(self, fs):
...
...
...
self.Time = datetime.datetime(Year, Month, Day, Hour, Minute, Second, NanoSecond / 1000)
self.Time = datetime.datetime(Year, Month, Day, Hour, Minute, Second, NanoSecond)
### What Python version are you using?
Python 3.10, Python 3.11
### Execution Environment
_No response_
### Pip packages
_No response_
### Additional context
_No response_
|
0.0
|
4bd867b57a0a959840e814ab006d172b16fca018
|
[
"edk2toollib/tests/test_authenticated_variables_structure_support.py::AuthVarSupportLibraryUnitTests::test_EFiVariableAuthentication2_load_from_file"
] |
[
"edk2toollib/tests/test_authenticated_variables_structure_support.py::AuthVarSupportLibraryUnitTests::test_EfiSignatureDatabase_Sort_and_Deduplication",
"edk2toollib/tests/test_authenticated_variables_structure_support.py::AuthVarSupportLibraryUnitTests::test_EfiSignatureList_Sort_and_Deduplication_sha256",
"edk2toollib/tests/test_authenticated_variables_structure_support.py::AuthVarSupportLibraryUnitTests::test_EfiSignatureList_Sort_and_Deduplication_x509",
"edk2toollib/tests/test_authenticated_variables_structure_support.py::AuthVarSupportLibraryUnitTests::test_deserializeEqualsSerialize",
"edk2toollib/tests/test_authenticated_variables_structure_support.py::AuthVarSupportLibraryUnitTests::test_print"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-27 23:13:49+00:00
|
bsd-2-clause
| 5,907 |
|
tianocore__edk2-pytool-library-365
|
diff --git a/edk2toollib/uefi/edk2/parsers/dec_parser.py b/edk2toollib/uefi/edk2/parsers/dec_parser.py
index 8c3c7aa..0a94464 100644
--- a/edk2toollib/uefi/edk2/parsers/dec_parser.py
+++ b/edk2toollib/uefi/edk2/parsers/dec_parser.py
@@ -7,6 +7,7 @@
##
"""Code to help parse DEC files."""
import os
+import re
from edk2toollib.uefi.edk2.parsers.base_parser import HashFileParser
from edk2toollib.uefi.edk2.parsers.guid_parser import GuidParser
@@ -123,7 +124,11 @@ def _parse(self, rawtext: str):
"""Parses the PcdDeclaration Entry for one PCD."""
sp = rawtext.partition(".")
self.token_space_name = sp[0].strip()
- op = sp[2].split("|")
+
+ # Regular expression pattern to match the symbol '|' that is not inside quotes
+ pattern = r'\|(?=(?:[^\'"]*[\'"][^\'"]*[\'"])*[^\'"]*$)'
+ op = re.split(pattern, sp[2])
+
# if it's 2 long, we need to check that it's a structured PCD
if (len(op) == 2 and op[0].count(".") > 0):
pass
|
tianocore/edk2-pytool-library
|
fd3aac552d5f995a5af6c9ecf5c7286bb9abbfe8
|
diff --git a/tests.unit/parsers/test_dec_parser.py b/tests.unit/parsers/test_dec_parser.py
index 528a256..a34bd1b 100644
--- a/tests.unit/parsers/test_dec_parser.py
+++ b/tests.unit/parsers/test_dec_parser.py
@@ -7,15 +7,18 @@
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
+import io
import unittest
import uuid
-import io
-from edk2toollib.uefi.edk2.parsers.dec_parser import LibraryClassDeclarationEntry
-from edk2toollib.uefi.edk2.parsers.dec_parser import GuidDeclarationEntry
-from edk2toollib.uefi.edk2.parsers.dec_parser import PpiDeclarationEntry
-from edk2toollib.uefi.edk2.parsers.dec_parser import ProtocolDeclarationEntry
-from edk2toollib.uefi.edk2.parsers.dec_parser import PcdDeclarationEntry
-from edk2toollib.uefi.edk2.parsers.dec_parser import DecParser
+
+from edk2toollib.uefi.edk2.parsers.dec_parser import (
+ DecParser,
+ GuidDeclarationEntry,
+ LibraryClassDeclarationEntry,
+ PcdDeclarationEntry,
+ PpiDeclarationEntry,
+ ProtocolDeclarationEntry,
+)
class TestGuidDeclarationEntry(unittest.TestCase):
@@ -107,6 +110,22 @@ def test_bad_structured_input(self):
with self.assertRaises(Exception):
PcdDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
+ def test_string_containing_a_pipe(self):
+ SAMPLE_DATA_DECL = """gTestTokenSpaceGuid.PcdTestString | L"TestVal_1 | TestVal_2" | VOID* | 0x00010001"""
+ a = PcdDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
+ self.assertEqual(a.token_space_name, "gTestTokenSpaceGuid")
+ self.assertEqual(a.name, "PcdTestString")
+ self.assertEqual(a.default_value, "L\"TestVal_1 | TestVal_2\"")
+ self.assertEqual(a.type, "VOID*")
+ self.assertEqual(a.id, "0x00010001")
+
+ SAMPLE_DATA_DECL = """gTestTokenSpaceGuid.PcdTestString | L'TestVal_1 | TestVal_2' | VOID* | 0x00010001"""
+ a = PcdDeclarationEntry("testpkg", SAMPLE_DATA_DECL)
+ self.assertEqual(a.token_space_name, "gTestTokenSpaceGuid")
+ self.assertEqual(a.name, "PcdTestString")
+ self.assertEqual(a.default_value, "L'TestVal_1 | TestVal_2'")
+ self.assertEqual(a.type, "VOID*")
+ self.assertEqual(a.id, "0x00010001")
class TestDecParser(unittest.TestCase):
|
[Bug]: \edk2toollib\uefi\edk2\parsers\dec_parser.py fails parsing PCDs when a string contains '|'
### Contact Details
_No response_
### Describe the Bug
File: \edk2toollib\uefi\edk2\parsers\dec_parser.py
Class: PcdDeclarationEntry()
Function: _parse(self, rawtext: str)
Code Snip:
``` python
def _parse(self, rawtext: str):
"""Parses the PcdDeclaration Entry for one PCD."""
sp = rawtext.partition(".")
self.token_space_name = sp[0].strip()
op = sp[2].split("|")
# if it's 2 long, we need to check that it's a structured PCD
if (len(op) == 2 and op[0].count(".") > 0):
pass
# otherwise it needs at least 4 parts
elif (len(op) < 4):
raise Exception(f"Too few parts: {op}")
# but also less than 5
elif (len(op) > 5):
raise Exception(f"Too many parts: {rawtext}")
elif (len(op) == 5 and op[4].strip() != '{'):
raise Exception(f"Too many parts: {rawtext}")
```
If the following line to declare PcdTestString is used in a .DEC file:
gTestTokenSpaceGuid.PcdTestString | L"TestVal_1 | TestVal_2" | VOID* | 0x00010001
This code will fail due to not detecting the second '|' character is part of the assignment string.
### Reproduction steps
1. Assign a string (VOID*) PCD to a string value that contains a '|' character
2. Run the pip module dec parser to collect all information from that DEC file
### Expected behavior
The DEC file parser should recognize the quotes around the PCD's string value and ignore any pertinent chars used when parsing the assignment line.
### What Python version are you using?
Python 3.10
### Execution Environment
Python 3.10.7
### Pip packages
Package Version
---------------------- ---------
antlr4-python3-runtime 4.7.1
astroid 2.13.2
asttokens 2.2.1
attrs 22.1.0
cffi 1.15.1
colorama 0.4.6
cryptography 3.3.1
dill 0.3.6
docutils 0.19
dtschema 2021.10
edk2-basetools 0.1.39
edk2-pytool-extensions 0.21.1
edk2-pytool-library 0.12.1
elementpath 3.0.2
future 0.18.2
gitdb 4.0.10
GitPython 3.1.29
isort 5.11.4
jedi 0.18.2
jsonschema 4.0.0
lazy-object-proxy 1.9.0
mccabe 0.7.0
mypy 0.991
mypy-extensions 0.4.3
parso 0.8.3
pefile 2022.5.30
pip 22.3.1
platformdirs 2.6.2
pycparser 2.21
pygit2 1.11.1
pylint 2.15.10
pyrsistent 0.19.1
pyserial 3.5
PyYAML 6.0
regex 2022.8.17
rfc3987 1.3.8
ruamel.yaml 0.17.17
semantic-version 2.10.0
Send2Trash 1.8.0
setuptools 63.2.0
six 1.16.0
smmap 5.0.0
thonny 4.0.1
tomli 2.0.1
tomlkit 0.11.6
typing_extensions 4.4.0
wheel 0.38.4
wrapt 1.14.1
xmlschema 2.0.1
### Additional context
The standard UEFI build process is OK with the '|' in a PCD string, it appears to be just this parser that has trouble.
|
0.0
|
fd3aac552d5f995a5af6c9ecf5c7286bb9abbfe8
|
[
"tests.unit/parsers/test_dec_parser.py::TestPcdDeclarationEntry::test_string_containing_a_pipe"
] |
[
"tests.unit/parsers/test_dec_parser.py::TestGuidDeclarationEntry::test_invalid_guid_format",
"tests.unit/parsers/test_dec_parser.py::TestGuidDeclarationEntry::test_valid_input_guid",
"tests.unit/parsers/test_dec_parser.py::TestGuidDeclarationEntry::test_valid_input_leading_zero_removed",
"tests.unit/parsers/test_dec_parser.py::TestPpiDeclarationEntry::test_valid_input_guid",
"tests.unit/parsers/test_dec_parser.py::TestProtocolDeclarationEntry::test_valid_input_guid",
"tests.unit/parsers/test_dec_parser.py::TestLibraryClassDeclarationEntry::test_valid_input",
"tests.unit/parsers/test_dec_parser.py::TestPcdDeclarationEntry::test_bad_structured_input",
"tests.unit/parsers/test_dec_parser.py::TestPcdDeclarationEntry::test_good_structured_input",
"tests.unit/parsers/test_dec_parser.py::TestPcdDeclarationEntry::test_invalid_input_no_tokenspace",
"tests.unit/parsers/test_dec_parser.py::TestPcdDeclarationEntry::test_invalid_input_too_many_fields",
"tests.unit/parsers/test_dec_parser.py::TestPcdDeclarationEntry::test_valid_input",
"tests.unit/parsers/test_dec_parser.py::TestDecParser::test_valid_input"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-13 21:43:43+00:00
|
bsd-2-clause
| 5,908 |
|
tianocore__edk2-pytool-library-372
|
diff --git a/edk2toollib/uefi/edk2/path_utilities.py b/edk2toollib/uefi/edk2/path_utilities.py
index b367133..b62f336 100644
--- a/edk2toollib/uefi/edk2/path_utilities.py
+++ b/edk2toollib/uefi/edk2/path_utilities.py
@@ -5,13 +5,17 @@
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
-"""Code to help convert Edk2, absolute, and relative file paths."""
+r"""A module for managing Edk2 file paths agnostic to OS path separators ("/" vs "\").
+
+This module converts all windows style paths to Posix file paths internally, but will return
+the OS specific path with the exception of of any function that returns an Edk2 style path,
+which will always return Posix form.
+"""
import errno
-import fnmatch
import logging
import os
from pathlib import Path
-from typing import Iterable
+from typing import Iterable, Optional
class Edk2Path(object):
@@ -19,6 +23,12 @@ class Edk2Path(object):
Class that helps perform path operations within an EDK workspace.
+ Attributes:
+ WorkspacePath (str): Absolute path to the workspace root.
+ PackagePathList (List[str]): List of absolute paths to a package.
+
+ Attributes are initialized by the constructor and are read-only.
+
!!! warning
Edk2Path performs expensive packages path and package validation when
instantiated. If using the same Workspace root and packages path, it is
@@ -36,34 +46,29 @@ class Edk2Path(object):
"""
- def __init__(self, ws: os.PathLike, package_path_list: Iterable[os.PathLike],
+ def __init__(self, ws: str, package_path_list: Iterable[str],
error_on_invalid_pp: bool = True):
"""Constructor.
Args:
- ws (os.PathLike): absolute path or cwd relative path of the workspace.
- package_path_list (Iterable[os.PathLike]): list of packages path.
- Entries can be Absolute path, workspace relative path, or CWD relative.
- error_on_invalid_pp (bool): default value is True. If packages path
- value is invalid raise exception.
+ ws: absolute path or cwd relative path of the workspace.
+ package_path_list: list of packages path. Entries can be Absolute path, workspace relative path, or CWD
+ relative.
+ error_on_invalid_pp: default value is True. If packages path value is invalid raise exception.
Raises:
(NotADirectoryError): Invalid workspace or package path directory.
"""
- self.WorkspacePath = ws
self.logger = logging.getLogger("Edk2Path")
# Other code is dependent the following types, so keep it that way:
# - self.PackagePathList: List[str]
# - self.WorkspacePath: str
-
- self.PackagePathList = []
- self.WorkspacePath = ""
-
+ ws = ws.replace("\\", "/")
workspace_candidate_path = Path(ws)
if not workspace_candidate_path.is_absolute():
- workspace_candidate_path = Path(os.getcwd(), ws)
+ workspace_candidate_path = Path.cwd() / ws
if not workspace_candidate_path.is_dir():
raise NotADirectoryError(
@@ -71,19 +76,19 @@ def __init__(self, ws: os.PathLike, package_path_list: Iterable[os.PathLike],
os.strerror(errno.ENOENT),
workspace_candidate_path.resolve())
- self.WorkspacePath = str(workspace_candidate_path)
+ self._workspace_path = workspace_candidate_path
candidate_package_path_list = []
- for a in package_path_list:
- if os.path.isabs(a):
- candidate_package_path_list.append(Path(a))
+ for a in [Path(path.replace("\\", "/")) for path in package_path_list]:
+ if a.is_absolute():
+ candidate_package_path_list.append(a)
else:
- wsr = Path(self.WorkspacePath, a)
+ wsr = self._workspace_path / a
if wsr.is_dir():
candidate_package_path_list.append(wsr)
else:
# assume current working dir relative. Will catch invalid dir when checking whole list
- candidate_package_path_list.append(Path(os.getcwd(), a))
+ candidate_package_path_list.append(Path.cwd() / a)
invalid_pp = []
for a in candidate_package_path_list[:]:
@@ -94,7 +99,7 @@ def __init__(self, ws: os.PathLike, package_path_list: Iterable[os.PathLike],
candidate_package_path_list.remove(a)
invalid_pp.append(str(a.resolve()))
- self.PackagePathList = [str(p) for p in candidate_package_path_list]
+ self._package_path_list = candidate_package_path_list
if invalid_pp and error_on_invalid_pp:
raise NotADirectoryError(errno.ENOENT, os.strerror(errno.ENOENT), invalid_pp)
@@ -111,9 +116,9 @@ def __init__(self, ws: os.PathLike, package_path_list: Iterable[os.PathLike],
# 3. Raise an Exception if two packages are found to be nested.
#
package_path_packages = {}
- for package_path in candidate_package_path_list:
+ for package_path in self._package_path_list:
package_path_packages[package_path] = \
- [Path(p).parent for p in package_path.glob('**/*.dec')]
+ [p.parent for p in package_path.glob('**/*.dec')]
# Note: The ability to ignore this function raising an exception on
# nested packages is temporary. Do not plan on this variable
@@ -170,22 +175,30 @@ def __init__(self, ws: os.PathLike, package_path_list: Iterable[os.PathLike],
f"environment variable to \"true\" as a temporary workaround "
f"until you fix the packages so they are no longer nested.")
- def GetEdk2RelativePathFromAbsolutePath(self, abspath):
- """Given an absolute path return a edk2 path relative to workspace or packagespath.
+ @property
+ def WorkspacePath(self):
+ """Workspace Path as a string."""
+ return str(self._workspace_path)
- Note: absolute path must be in the OS specific path form
- Note: the relative path will be in POSIX-like path form
+ @property
+ def PackagePathList(self):
+ """List of package paths as strings."""
+ return [str(p) for p in self._package_path_list]
+
+ def GetEdk2RelativePathFromAbsolutePath(self, abspath: str):
+ """Given an absolute path return a edk2 path relative to workspace or packagespath.
Args:
- abspath (os.PathLike): absolute path to a file or directory. Path must contain OS specific separator.
+ abspath: absolute path to a file or directory. Supports both Windows and Posix style paths
Returns:
- (os.PathLike): POSIX-like relative path to workspace or packagespath
+ (str): POSIX-like relative path to workspace or packagespath
(None): abspath is none
(None): path is not valid
"""
if abspath is None:
return None
+ abspath = Path(abspath.replace("\\", "/"))
relpath = None
found = False
@@ -196,23 +209,23 @@ def GetEdk2RelativePathFromAbsolutePath(self, abspath):
# Sort the package paths from from longest to shortest. This handles the case where a package and a package
# path are in the same directory. See the following path_utilities_test for a detailed explanation of the
# scenario: test_get_relative_path_when_folder_is_next_to_package
- for packagepath in sorted((os.path.normcase(p) for p in self.PackagePathList), reverse=True):
+ for packagepath in sorted(self._package_path_list, reverse=True):
# If a match is found, use the original string to avoid change in case
- if os.path.normcase(abspath).startswith(packagepath):
+ if abspath.is_relative_to(packagepath):
self.logger.debug("Successfully converted AbsPath to Edk2Relative Path using PackagePath")
- relpath = abspath[len(packagepath):]
+ relpath = abspath.relative_to(packagepath)
found = True
break
# If a match was not found, check if absolute path is based on the workspace root.
- if not found and os.path.normcase(abspath).startswith(os.path.normcase(self.WorkspacePath)):
+ if not found and abspath.is_relative_to(self._workspace_path):
self.logger.debug("Successfully converted AbsPath to Edk2Relative Path using WorkspacePath")
- relpath = abspath[len(self.WorkspacePath):]
+ relpath = abspath.relative_to(self._workspace_path)
found = True
if found:
- relpath = relpath.replace(os.sep, "/").strip("/")
+ relpath = relpath.as_posix()
self.logger.debug(f'[{abspath}] -> [{relpath}]')
return relpath
@@ -221,29 +234,29 @@ def GetEdk2RelativePathFromAbsolutePath(self, abspath):
self.logger.error(f'AbsolutePath: {abspath}')
return None
- def GetAbsolutePathOnThisSystemFromEdk2RelativePath(self, relpath, log_errors=True):
+ def GetAbsolutePathOnThisSystemFromEdk2RelativePath(self, relpath: str, log_errors: Optional[bool]=True):
"""Given a edk2 relative path return an absolute path to the file in this workspace.
Args:
- relpath (os.PathLike): POSIX-like path
- log_errors (:obj:`bool`, optional): whether to log errors
+ relpath: Relative path to convert. Supports both Windows and Posix style paths.
+ log_errors: whether to log errors
Returns:
- (os.PathLike): absolute path in the OS specific form
+ (str): absolute path in the OS specific form
(None): invalid relpath
(None): Unable to get the absolute path
"""
if relpath is None:
return None
- relpath = relpath.replace("/", os.sep)
- abspath = os.path.join(self.WorkspacePath, relpath)
- if os.path.exists(abspath):
- return abspath
-
- for a in self.PackagePathList:
- abspath = os.path.join(a, relpath)
- if (os.path.exists(abspath)):
- return abspath
+ relpath = relpath.replace("\\", "/")
+ abspath = self._workspace_path / relpath
+ if abspath.exists():
+ return str(abspath)
+
+ for a in self._package_path_list:
+ abspath = a / relpath
+ if abspath.exists():
+ return str(abspath)
if log_errors:
self.logger.error("Failed to convert Edk2Relative Path to an Absolute Path on this system.")
self.logger.error("Relative Path: %s" % relpath)
@@ -255,51 +268,45 @@ def GetContainingPackage(self, InputPath: str) -> str:
This isn't perfect but at least identifies the directory consistently.
- Note: The inputPath must be in the OS specific path form.
-
Args:
- InputPath (str): absolute path to a file, directory, or module.
- supports both windows and linux like paths.
+ InputPath: absolute path to a file, directory, or module. Supports both windows and linux like paths.
Returns:
(str): name of the package that the module is in.
"""
self.logger.debug("GetContainingPackage: %s" % InputPath)
+ InputPath = Path(InputPath.replace("\\", "/"))
# Make a list that has the path case normalized for comparison.
# Note: This only does anything on Windows
- package_paths = [os.path.normcase(x) for x in self.PackagePathList]
- workspace_path = os.path.normcase(self.WorkspacePath)
# 1. Handle the case that InputPath is not in the workspace tree
path_root = None
- if workspace_path not in os.path.normcase(InputPath):
- for p in package_paths:
- if p in os.path.normcase(InputPath):
+ if not InputPath.is_relative_to(self._workspace_path):
+ for p in self._package_path_list:
+ if InputPath.is_relative_to(p):
path_root = p
break
if not path_root:
return None
+ else:
+ path_root = self._workspace_path
# 2. Determine if the path is under a package in the workspace
# Start the search within the first available directory. If provided InputPath is a directory, start there,
# else (if InputPath is a file) move to it's parent directory and start there.
- if os.path.isdir(InputPath):
- dirpath = str(InputPath)
+ if InputPath.is_dir():
+ dirpath = InputPath
else:
- dirpath = os.path.dirname(InputPath)
-
- if not path_root:
- path_root = workspace_path
+ dirpath = InputPath.parent
- while path_root != os.path.normcase(dirpath):
- if os.path.exists(dirpath):
- for f in os.listdir(dirpath):
- if fnmatch.fnmatch(f.lower(), '*.dec'):
- a = os.path.basename(dirpath)
- return a
+ while not path_root.samefile(dirpath):
+ if dirpath.exists():
+ for f in dirpath.iterdir():
+ if f.suffix.lower() =='.dec':
+ return dirpath.name
- dirpath = os.path.dirname(dirpath)
+ dirpath = dirpath.parent
return None
@@ -318,23 +325,21 @@ def GetContainingModules(self, input_path: str) -> list[str]:
will be returned in a list of file path strings.
Args:
- input_path (str): Absolute path to a file, directory, or module.
- Supports both Windows and Linux like paths.
+ input_path: Absolute path to a file, directory, or module.
+ Supports both Windows and Posix like paths.
Returns:
(list[str]): Absolute paths of .inf files that could be the
containing module.
"""
- input_path = Path(input_path)
+ input_path = Path(input_path.replace("\\", "/"))
if not input_path.is_absolute():
# Todo: Return a more specific exception type when
# https://github.com/tianocore/edk2-pytool-library/issues/184 is
# implemented.
raise Exception("Module path must be absolute.")
- package_paths = [Path(os.path.normcase(x)) for x in self.PackagePathList]
- workspace_path = Path(os.path.normcase(self.WorkspacePath))
- all_root_paths = package_paths + [workspace_path]
+ all_root_paths = self._package_path_list + [self._workspace_path]
# For each root path, find the maximum allowed root in its hierarchy.
maximum_root_paths = all_root_paths
@@ -357,7 +362,7 @@ def GetContainingModules(self, input_path: str) -> list[str]:
return []
modules = []
- if input_path.suffix == '.inf':
+ if input_path.suffix.lower() == '.inf':
# Return the file path given since it is a module .inf file
modules = [str(input_path)]
|
tianocore/edk2-pytool-library
|
f521d59041afee6a8a82206b3871960409aaa612
|
diff --git a/tests.unit/test_path_utilities.py b/tests.unit/test_path_utilities.py
index 0839c10..da7cf6c 100644
--- a/tests.unit/test_path_utilities.py
+++ b/tests.unit/test_path_utilities.py
@@ -174,13 +174,13 @@ def test_invalid_pp(self):
(ws / "good_path").mkdir()
with self.assertRaises(NotADirectoryError) as context:
- Edk2Path(ws, ["bad_pp_path", "bad_pp_path2", "good_path"], error_on_invalid_pp=True)
+ Edk2Path(str(ws), ["bad_pp_path", "bad_pp_path2", "good_path"], error_on_invalid_pp=True)
self.assertTrue('bad_pp_path' in str(context.exception))
self.assertTrue('bad_pp_path2' in str(context.exception))
self.assertTrue('good_path' not in str(context.exception))
# Make sure we don't throw an exception unless we mean to
- Edk2Path(ws, ["bad_pp_path", "bad_pp_path2", "good_path"], error_on_invalid_pp=False)
+ Edk2Path(str(ws), ["bad_pp_path", "bad_pp_path2", "good_path"], error_on_invalid_pp=False)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_basic_init_ws_abs_different_case(self):
@@ -1117,6 +1117,39 @@ def test_get_relative_path_when_package_is_not_directly_inside_packages_path(sel
self.assertEqual(pathobj.GetEdk2RelativePathFromAbsolutePath(p),
f"{folder_extra_rel}/{ws_p_name}/{ws_p_name}.dec")
+ def test_get_edk2_relative_path_with_windows_path_on_linux(self):
+ '''Test basic usage of GetEdk2RelativePathFromAbsolutePath when the
+ provided path is a Windows path, but the code is running on linux.
+
+ File layout:
+
+ root/ <-- current working directory (self.tmp)
+ folder_ws/ <-- workspace root
+ folder_pp/ <-- packages path
+ folder_extra/
+ PPTestPkg/ <-- A edk2 package
+ PPTestPkg.DEC
+ '''
+ ws_rel = "folder_ws"
+ ws_abs = os.path.join(self.tmp, ws_rel)
+ os.mkdir(ws_abs)
+
+ folder_pp_rel = "folder_pp"
+ folder_pp_abs = os.path.join(ws_abs, folder_pp_rel)
+ os.mkdir(folder_pp_abs)
+
+ folder_extra_rel = "folder_extra"
+ folder_extra_abs = os.path.join(folder_pp_abs, folder_extra_rel)
+ os.mkdir(folder_extra_abs)
+
+ ws_p_name = "PPTestPkg"
+ ws_pkg_abs = self._make_edk2_package_helper(folder_extra_abs, ws_p_name)
+ pathobj = Edk2Path(ws_abs, [folder_pp_abs])
+
+ p = f"{ws_pkg_abs}\\module2\\X64\\TestFile.c"
+ self.assertEqual(pathobj.GetEdk2RelativePathFromAbsolutePath(p),
+ f"{folder_extra_rel}/PPTestPkg/module2/X64/TestFile.c")
+
def test_get_absolute_path_on_this_system_from_edk2_relative_path(self):
'''Test basic usage of GetAbsolutePathOnThisSystemFromEdk2RelativePath with packages path nested
inside the workspace.
|
[Bug]: GetEdk2RelativePathFromAbsolutePath() Can Silently Return Non-POSIX Path
### Contact Details
_No response_
### Describe the Bug
GetEdk2RelativePathFromAbsolutePath() is currently defined as follows:
```python
"""Given an absolute path return a edk2 path relative to workspace or packagespath.
Note: absolute path must be in the OS specific path form
Note: the relative path will be in POSIX-like path form
Args:
abspath (os.PathLike): absolute path to a file or directory. Path must contain OS specific separator.
Returns:
(os.PathLike): POSIX-like relative path to workspace or packagespath
(None): abspath is none
(None): path is not valid
"""
```
It requires an OS-specific path (to the current OS) to be given as input. However, it does not return an exception or other fail in a well-defined way if path given does not conform to the current OS. It likely returns a non-POSIX path.
So, this is not a bug with the function in the case the user passes the expected path format but a bug in validating the function input values.
### Reproduction steps
The following code demonstrates the problem if run on Linux since it passes a Windows style path to the function.
```
p = f"{ws_pkg_abs}\\module_2\\X64\\TestFile.inf"
expected_rel_from_abs_path = PurePath(os.path.join(ws_p_name, "module_2", "X64", "TestFile.inf")).as_posix()
actual_rel_from_abs_path = pathobj.GetEdk2RelativePathFromAbsolutePath(p)
> self.assertEqual(expected_rel_from_abs_path, actual_rel_from_abs_path)
E AssertionError: 'WSTestPkg/module_2/X64/TestFile.inf' != 'WSTestPkg\\module_2\\X64\\TestFile.inf'
E - WSTestPkg/module_2/X64/TestFile.inf
E ? ^ ^ ^
E + WSTestPkg\module_2\X64\TestFile.inf
```
### Expected behavior
The function should ensure that it handles input in a predictable and well-defined manner.
### Execution Environment
_No response_
### Pip packages
_No response_
### Additional context
_No response_
|
0.0
|
f521d59041afee6a8a82206b3871960409aaa612
|
[
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_edk2_relative_path_with_windows_path_on_linux"
] |
[
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_basic_init_ws_abs",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_basic_init_ws_cwd",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_absolute_path_on_this_system_from_edk2_relative_path",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_absolute_path_then_relative_path_when_path_contains_repeated_packagepath_name",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_containing_module",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_containing_module_with_infs_in_other_temp_dirs",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_containing_modules_path_format",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_containing_modules_with_relative_path",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_containing_package_inside_workspace",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_containing_package_outside_workspace",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_edk2_relative_path_from_absolute_path",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_edk2_relative_path_from_absolute_path_posix",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_folder_is_next_to_package",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_package_is_not_directly_inside_packages_path",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_package_path_inside_package",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_packages_path_list_contains_substrings",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_when_path_does_not_exist",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_get_relative_path_with_nested_packages",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_invalid_pp",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_nonexistant_abs",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_nonexistant_ws",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_pp_inside_workspace",
"tests.unit/test_path_utilities.py::PathUtilitiesTest::test_pp_outside_workspace"
] |
{
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-20 21:26:32+00:00
|
bsd-2-clause
| 5,909 |
|
tianocore__edk2-pytool-library-379
|
diff --git a/edk2toollib/log/ansi_handler.py b/edk2toollib/log/ansi_handler.py
index 76f88fc..2076355 100644
--- a/edk2toollib/log/ansi_handler.py
+++ b/edk2toollib/log/ansi_handler.py
@@ -212,7 +212,7 @@ def format(self, record):
# otherwise color the wholes message
else:
levelname_color = get_ansi_string(ColoredFormatter.COLORS[levelname]) + levelname
- record.msg += get_ansi_string()
+ record.msg = str(org_message) + get_ansi_string()
record.levelname = levelname_color
if self.use_azure and levelname in ColoredFormatter.AZURE_COLORS:
|
tianocore/edk2-pytool-library
|
85b899a8a29c839fca15f4b8a008e4654cc71697
|
diff --git a/tests.unit/test_ansi_handler.py b/tests.unit/test_ansi_handler.py
index 346a38a..20f4f0d 100644
--- a/tests.unit/test_ansi_handler.py
+++ b/tests.unit/test_ansi_handler.py
@@ -5,10 +5,10 @@
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
-import unittest
import logging
-from edk2toollib.log.ansi_handler import ColoredFormatter
-from edk2toollib.log.ansi_handler import ColoredStreamHandler
+import unittest
+
+from edk2toollib.log.ansi_handler import ColoredFormatter, ColoredStreamHandler
try:
from StringIO import StringIO
@@ -26,6 +26,15 @@ class AnsiHandlerTest(unittest.TestCase):
record2 = logging.makeLogRecord({"name": "", "level": logging.INFO, "levelno": logging.INFO,
"levelname": "INFO", "path": "test_path", "lineno": 0,
"msg": "Test message"})
+ record3 = logging.makeLogRecord({"name": "", "level": logging.ERROR, "levelno": logging.ERROR,
+ "levelname": "ERROR", "path": "test_path", "lineno": 0,
+ "msg": ['Logging', 'A', 'List']})
+ record4 = logging.makeLogRecord({"name": "", "level": logging.ERROR, "levelno": logging.ERROR,
+ "levelname": "ERROR", "path": "test_path", "lineno": 0,
+ "msg": ('Logging', 'A', 'Tuple')})
+ record5 = logging.makeLogRecord({"name": "", "level": logging.ERROR, "levelno": logging.ERROR,
+ "levelname": "ERROR", "path": "test_path", "lineno": 0,
+ "msg": "Testing This Works: %s", "args": ("Test",)})
def test_colored_formatter_init(self):
formatter = ColoredFormatter("%(levelname)s - %(message)s")
@@ -82,3 +91,23 @@ def test_color_handler_not_strip_ansi(self):
if CSI in line:
found_csi = True
self.assertTrue(found_csi, "We are supposed to to have found an ANSI control character %s" % lines)
+
+ def test_ansi_handler_with_list(self):
+ """Tests that the ANSI handler can handle Iterables in the message."""
+ stream = StringIO()
+ formatter = ColoredFormatter("%(levelname)s - %(message)s")
+ handler = ColoredStreamHandler(stream, strip=False, convert=False)
+ handler.setFormatter(formatter)
+ handler.setLevel(logging.INFO)
+
+ handler.emit(AnsiHandlerTest.record3)
+ handler.emit(AnsiHandlerTest.record4)
+ handler.emit(AnsiHandlerTest.record5)
+ handler.flush()
+
+ stream.seek(0)
+ lines = stream.readlines()
+ CSI = '\033[31m' # Red
+ CSI2 = '\033[39m' # Reset
+ for line in lines:
+ assert CSI in line and CSI2 in line
|
Pytool injecting extra list elements during logging
When logging level is higher than warning level, the ANSI handler from pytool will inject extra element to the message. https://github.com/tianocore/edk2-pytool-library/blob/0eac598e871e8f74068ac00089bf39f09d1264eb/edk2toollib/log/ansi_handler.py#L201
This is a problem when the incoming message is a list as is, since the input list will be updated by this statement and the next consumer using the same list will have an updated list with ANSI information inside.
|
0.0
|
85b899a8a29c839fca15f4b8a008e4654cc71697
|
[
"tests.unit/test_ansi_handler.py::AnsiHandlerTest::test_ansi_handler_with_list"
] |
[
"tests.unit/test_ansi_handler.py::AnsiHandlerTest::test_color_handler_not_strip_ansi",
"tests.unit/test_ansi_handler.py::AnsiHandlerTest::test_color_handler_to_strip_ansi",
"tests.unit/test_ansi_handler.py::AnsiHandlerTest::test_colored_formatter_init",
"tests.unit/test_ansi_handler.py::AnsiHandlerTest::test_colored_formatter_to_output_ansi"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-08-03 16:46:07+00:00
|
bsd-2-clause
| 5,910 |
|
tianocore__edk2-pytool-library-84
|
diff --git a/edk2toollib/uefi/edk2/parsers/base_parser.py b/edk2toollib/uefi/edk2/parsers/base_parser.py
index 99b25e2..f22b6c5 100644
--- a/edk2toollib/uefi/edk2/parsers/base_parser.py
+++ b/edk2toollib/uefi/edk2/parsers/base_parser.py
@@ -133,7 +133,16 @@ def ComputeResult(self, value, cond, value2):
"""
ivalue = value
ivalue2 = value2
+ if isinstance(value, str):
+ ivalue = value.strip("\"")
+ if isinstance(value2, str):
+ ivalue2 = value2.strip("\"")
+
# convert it to interpretted value
+ if (cond.upper() == "IN"):
+ # strip quotes
+ self.Logger.debug(f"{ivalue} in {ivalue2}")
+ return ivalue in ivalue2
try:
ivalue = self.ConvertToInt(ivalue)
@@ -147,6 +156,12 @@ def ComputeResult(self, value, cond, value2):
except ValueError:
pass
+ # First check our boolean operators
+ if (cond.upper() == "OR"):
+ return ivalue or ivalue2
+ if (cond.upper() == "AND"):
+ return ivalue and ivalue2
+
# check our truthyness
if(cond == "=="):
# equal
@@ -156,17 +171,13 @@ def ComputeResult(self, value, cond, value2):
# not equal
return (ivalue != ivalue2) and (value != value2)
- elif (cond.lower() == "in"):
- # contains
- return value in value2
-
# check to make sure we only have digits from here on out
- if not str.isdigit(value):
+ if not isinstance(value, int) and not str.isdigit(value):
self.Logger.error(f"{self.__class__}: Unknown value: {value} {ivalue.__class__}")
self.Logger.debug(f"{self.__class__}: Conditional: {value} {cond}{value2}")
raise ValueError("Unknown value")
- if not str.isdigit(value2):
+ if not isinstance(value2, int) and not str.isdigit(value2):
self.Logger.error(f"{self.__class__}: Unknown value: {value2} {ivalue2}")
self.Logger.debug(f"{self.__class__}: Conditional: {value} {cond} {value2}")
raise ValueError("Unknown value")
@@ -200,6 +211,8 @@ def ConvertToInt(self, value):
Returns:
"""
+ if isinstance(value, int):
+ return value
if isinstance(value, str) and value.upper() == "TRUE":
return 1
elif isinstance(value, str) and value.upper() == "FALSE":
@@ -320,17 +333,7 @@ def ProcessConditional(self, text):
else:
tokens = text.split()
if(tokens[0].lower() == "!if"):
- # need to add support for OR/AND
- if (len(tokens) == 2):
- value = self.ConvertToInt(tokens[1].strip())
- self.PushConditional(value == 1) # if the value is true
- # we can have tokens in 4, 8, 12 etc
- elif len(tokens) >= 4 and len(tokens) % 4 == 0:
- con = self.ComputeResult(tokens[1].strip(), tokens[2].strip(), tokens[3].strip())
- self.PushConditional(con)
- else:
- self.Logger.error("!if conditionals need to be formatted correctly (spaces between each token)")
- raise RuntimeError("Invalid conditional", text)
+ self.PushConditional(self.EvaluateConditional(text))
return True
elif(tokens[0].lower() == "!ifdef"):
@@ -365,9 +368,135 @@ def ProcessConditional(self, text):
return False
+ def EvaluateConditional(self, text):
+ ''' Uses a pushdown resolver '''
+ text = str(text).strip()
+ if not text.lower().startswith("!if "):
+ raise RuntimeError(f"Invalid conditional cannot be validated: {text}")
+ text = text[3:].strip()
+ self.Logger.debug(f"STAGE 1: {text}")
+ text = self.ReplaceVariables(text)
+ self.Logger.debug(f"STAGE 2: {text}")
+
+ # TOKENIZER
+ # first we create tokens
+ TEXT_MODE = 0
+ QUOTE_MODE = 1
+ MACRO_MODE = 2
+ token = ""
+ mode = 0
+ tokens = []
+ for character in text:
+
+ if character == "\"" and len(token) == 0:
+ mode = QUOTE_MODE
+ elif character == "\"" and mode == QUOTE_MODE:
+ if len(token) > 0:
+ tokens.append(f"\"{token}\"")
+ token = ""
+ mode = TEXT_MODE
+ elif character == "$" and len(token) == 0:
+ token += character
+ mode = MACRO_MODE
+ elif character == ')' and mode == MACRO_MODE:
+ token += character
+ tokens.append(token)
+ token = ""
+ mode = TEXT_MODE
+ elif mode == TEXT_MODE and (character == "(" or character == ")"):
+ if len(token) > 0:
+ tokens.append(token)
+ token = ""
+ tokens.append(character)
+ elif character == " " and (mode == TEXT_MODE or mode == MACRO_MODE):
+ if len(token) > 0:
+ tokens.append(token)
+ token = ""
+ mode = TEXT_MODE
+ else:
+ token += character
+ # make sure to add in the last token just in case
+ if len(token) > 0:
+ tokens.append(token)
+
+ self.Logger.debug(f"STAGE 3: {' '.join(tokens)}")
+
+ operators = ["OR", "AND", "IN", "==", "!=", ">", "<", "<=", ">="]
+
+ # then we do the lexer and convert operands as necessary
+ for index in range(len(tokens)):
+ token = tokens[index]
+ token_upper = token.upper()
+ if token_upper in operators:
+ token = token_upper
+ elif token_upper == "||":
+ token = "OR"
+ elif token_upper == "&&":
+ token = "AND"
+ elif token_upper == "EQ":
+ token = "=="
+ elif token_upper == "NE":
+ token = "!="
+ tokens[index] = token
+ self.Logger.debug(f"STAGE 4: {tokens}")
+
+ # now we convert in fix into post fix?
+ stack = ["("]
+ tokens.append(")") # add an extra parathesis
+ expression = []
+ for token in tokens:
+ if token == "(":
+ stack.append(token)
+ elif token == ")":
+ while len(stack) > 0 and stack[-1] != '(':
+ expression.append(stack.pop())
+ elif token in operators:
+ while len(stack) > 0 and stack[-1] != '(':
+ self.Logger.debug(stack[-1])
+ expression.append(stack.pop())
+ stack.append(token)
+ else:
+ expression.append(token)
+ while len(stack) > 0:
+ val = stack.pop()
+ if val != '(':
+ expression.append(val)
+
+ self.Logger.debug(f"STAGE 5: {expression}")
+
+ # Now we evaluate the post fix expression
+ if len(expression) == 0:
+ raise RuntimeError(f"Malformed !if conditional expression {text} {expression}")
+ while len(expression) != 1:
+ first_operand_index = -1
+ for index, item in enumerate(expression):
+ if item in operators:
+ first_operand_index = index
+ break
+ if first_operand_index == -1:
+ raise RuntimeError(f"We didn't find an operator to execute in {expression}: {text}")
+ operand = expression[first_operand_index]
+ if first_operand_index < 2:
+ raise RuntimeError(f"We have a stray operand {operand}")
+ operator1 = expression[first_operand_index - 2]
+ operator2 = expression[first_operand_index - 1]
+
+ result = self.ComputeResult(operator1, operand, operator2)
+ self.Logger.debug(f"{operator1} {operand} {operator2} = {result} @ {first_operand_index}")
+ new_expression = expression[:first_operand_index - 2] if first_operand_index > 2 else []
+ self.Logger.debug(new_expression)
+ new_expression += [result, ] + expression[first_operand_index + 1:]
+ expression = new_expression
+
+ final = self.ConvertToInt(expression[0])
+ self.Logger.debug(f" FINAL {expression} {final}")
+
+ return bool(final)
+
#
# returns true or false depending on what state of conditional you are currently in
#
+
def InActiveCode(self):
""" """
ret = True
|
tianocore/edk2-pytool-library
|
898d19216a5218dc3e907203befc7dc7818b8595
|
diff --git a/edk2toollib/uefi/edk2/parsers/base_parser_test.py b/edk2toollib/uefi/edk2/parsers/base_parser_test.py
index a043cec..d51807a 100644
--- a/edk2toollib/uefi/edk2/parsers/base_parser_test.py
+++ b/edk2toollib/uefi/edk2/parsers/base_parser_test.py
@@ -49,6 +49,9 @@ def test_replace_macro_local_var_priority(self):
line = "Hello $(name)!"
self.assertEqual(parser.ReplaceVariables(line), "Hello fred!")
+
+class TestBaseParserConditionals(unittest.TestCase):
+
def test_replace_macro_without_resolution(self):
parser = BaseParser("")
parser.SetInputVars({
@@ -385,6 +388,52 @@ def test_process_conditional_reset(self):
self.assertTrue(parser.InActiveCode())
self.assertEqual(len(parser.ConditionalStack), 0)
+ def test_process_in_conditional(self):
+ parser = BaseParser("")
+ parser.SetInputVars({"TOOL_CHAIN_TAG": "GCC5_TEST"})
+ self.assertTrue(parser.ProcessConditional(
+ '!if ("GCC49" in $(TOOL_CHAIN_TAG)) OR ("GCC5" in $(TOOL_CHAIN_TAG))'))
+ self.assertTrue(parser.InActiveCode())
+ parser.ResetParserState()
+ parser.SetInputVars({"TOOL_CHAIN_TAG": "TESTGCC49"})
+ self.assertTrue(parser.ProcessConditional(
+ '!if ("GCC49" in $(TOOL_CHAIN_TAG)) OR ("GCC5" in $(TOOL_CHAIN_TAG))'))
+ self.assertTrue(parser.InActiveCode())
+ parser.ResetParserState()
+ # Don't give it a tool chain tag that isn't in the things we're searching for
+ parser.SetInputVars({"TOOL_CHAIN_TAG": "NOTFOUND"})
+ self.assertTrue(parser.ProcessConditional(
+ '!if ("GCC49" in $(TOOL_CHAIN_TAG)) OR ("GCC5" in $(TOOL_CHAIN_TAG))'))
+ self.assertFalse(parser.InActiveCode())
+
+ def test_process_or_operation_conditional(self):
+ parser = BaseParser("")
+ self.assertTrue(parser.EvaluateConditional('!IF TRUE OR FALSE'))
+ self.assertTrue(parser.EvaluateConditional('!if FALSE OR TRUE'))
+ self.assertTrue(parser.EvaluateConditional('!if FALSE || TRUE'))
+ self.assertTrue(parser.EvaluateConditional('!if TRUE OR TRUE'))
+ self.assertFalse(parser.EvaluateConditional('!if FALSE OR FALSE'))
+ self.assertFalse(parser.EvaluateConditional('!if FALSE || FALSE'))
+
+ def test_process_and_operation_conditional(self):
+ parser = BaseParser("")
+ self.assertFalse(parser.EvaluateConditional('!if TRUE AND FALSE'))
+ self.assertFalse(parser.EvaluateConditional('!if FALSE AND TRUE'))
+ self.assertTrue(parser.EvaluateConditional('!if TRUE AND TRUE'))
+ self.assertTrue(parser.EvaluateConditional('!if TRUE && TRUE'))
+ self.assertFalse(parser.EvaluateConditional('!if FALSE AND FALSE'))
+ self.assertFalse(parser.EvaluateConditional('!if FALSE && FALSE'))
+
+ def test_process_invalid_conditional(self):
+ parser = BaseParser("")
+ with self.assertRaises(RuntimeError):
+ parser.EvaluateConditional('!if TRUE AND FALSE AND')
+ with self.assertRaises(RuntimeError):
+ parser.EvaluateConditional('TRUE AND FALSE AND')
+
+
+class TestBaseParserGuids(unittest.TestCase):
+
def test_is_guid(self):
guid1 = "= { 0xD3B36F2C, 0xD551, 0x11D4, {0x9A, 0x46, 0x0, 0x90, 0x27, 0x3F, 0xC1,0xD }}"
parser = BaseParser("")
@@ -421,6 +470,9 @@ def test_parse_guid(self):
guid4_result = parser.ParseGuid(guid4)
self.assertEqual(guid4_result, guid4_answer)
+
+class TestBaseParserVariables(unittest.TestCase):
+
def test_replace_input_variables(self):
parser = BaseParser("")
variables = {
@@ -477,6 +529,9 @@ def test_replace_local_variables(self):
val = "var " + str(variables[variable_key])
self.assertEqual(result, val)
+
+class TestBaseParserPathAndFile(unittest.TestCase):
+
# because of how this works we use WriteLines, SetAbsPath, and SetPackagePath
def test_find_path(self):
# we're using write lines to make sure everything wo
|
10.7 breaks certain "in" conditionals
This conditional is broken with latest release:
!if ("GCC49" in $(TOOL_CHAIN_TAG)) OR ("GCC5" in $(TOOL_CHAIN_TAG))
...
!endif
|
0.0
|
898d19216a5218dc3e907203befc7dc7818b8595
|
[
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_and_operation_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_in_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_invalid_conditional",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_or_operation_conditional"
] |
[
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParser::test_replace_boolean_constants",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParser::test_replace_macro_local_var_priority",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParser::test_replace_macro_using_dollarsign",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_conditional_ifdef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_conditional_ifndef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_bad_else",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_bad_endif",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_ands_ors",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_false_equals_zero",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_greater_than",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_greater_than_equal",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_hex_number",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_invalid_operators",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_less_than",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_less_than_equal",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_non_numerical",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_not_equals_true_false",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_reset",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_single_boolean",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_cannot_be_greater_than",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_cannot_be_greater_than_hex",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_equals_one",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_true_not_equals_false",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_conditional_variables",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_else",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_extra_tokens",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_process_garbage_input",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_elseif",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifdef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifdef_dollarsign",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifndef",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_ifndef_dollarsign",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserConditionals::test_replace_macro_without_resolution",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserGuids::test_is_guid",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserGuids::test_parse_guid",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserVariables::test_replace_input_variables",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserVariables::test_replace_local_variables",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserPathAndFile::test_find_path",
"edk2toollib/uefi/edk2/parsers/base_parser_test.py::TestBaseParserPathAndFile::test_write_lines"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-28 19:07:16+00:00
|
bsd-2-clause
| 5,911 |
|
tillahoffmann__localscope-10
|
diff --git a/README.rst b/README.rst
index fee5d6c..b976dbc 100644
--- a/README.rst
+++ b/README.rst
@@ -20,7 +20,7 @@ Interactive python sessions, such as `Jupyter notebooks <https://jupyter.org/>`_
... print(a)
Traceback (most recent call last):
...
- ValueError: `a` is not a permitted global
+ localscope.LocalscopeException: `a` is not a permitted global (file "...", line 1, in print_a)
Motivation and detailed example
-------------------------------
@@ -65,7 +65,7 @@ This example may seem contrived. But unintended information leakage from the glo
... return sum(((x - y) / sigma) ** 2 for x, y in zip(xs, ys))
Traceback (most recent call last):
...
- ValueError: `sigma` is not a permitted global
+ localscope.LocalscopeException: `sigma` is not a permitted global (file "...", line 3, in <genexpr>)
Interface
---------
diff --git a/localscope/__init__.py b/localscope/__init__.py
index ff876da..bec61c5 100644
--- a/localscope/__init__.py
+++ b/localscope/__init__.py
@@ -16,7 +16,6 @@ def localscope(
predicate: Optional[Callable] = None,
allowed: Optional[Set[str]] = None,
allow_closure: bool = False,
- _globals: Optional[Dict[str, Any]] = None,
):
"""
Restrict the scope of a callable to local variables to avoid unintentional
@@ -27,8 +26,6 @@ def localscope(
predicate : Predicate to determine whether a global variable is allowed in the
scope. Defaults to allow any module.
allowed: Names of globals that are allowed to enter the scope.
- _globals : Globals associated with the root callable which are passed to
- dependent code blocks for analysis.
Attributes:
mfc: Decorator allowing *m*\\ odules, *f*\\ unctions, and *c*\\ lasses to enter
@@ -44,7 +41,8 @@ def localscope(
... print(a)
Traceback (most recent call last):
...
- ValueError: `a` is not a permitted global
+ localscope.LocalscopeException: `a` is not a permitted global (file "...",
+ line 1, in print_a)
The scope of a function can be extended by providing a list of allowed
exceptions.
@@ -85,53 +83,111 @@ def localscope(
blocks) at the time of declaration because static analysis has a minimal impact
on performance and it is easier to implement.
"""
- # Set defaults
- predicate = predicate or inspect.ismodule
+ # Set defaults and construct partial if the callable has not yet been provided for
+ # parameterized decorators, e.g., @localscope(allowed={"foo", "bar"}). This is a
+ # thin wrapper around the actual implementation `_localscope`. The wrapper
+ # reconstructs an informative traceback.
allowed = set(allowed) if allowed else set()
- if func is None:
+ predicate = predicate or inspect.ismodule
+ if not func:
return ft.partial(
localscope,
allow_closure=allow_closure,
- predicate=predicate,
allowed=allowed,
+ predicate=predicate,
)
+ return _localscope(
+ func,
+ allow_closure=allow_closure,
+ allowed=allowed,
+ predicate=predicate,
+ _globals={},
+ )
+
+
+class LocalscopeException(RuntimeError):
+ """
+ Raised when a callable tries to access a non-local variable.
+ """
+
+ def __init__(
+ self,
+ message: str,
+ code: types.CodeType,
+ instruction: Optional[dis.Instruction] = None,
+ ) -> None:
+ if instruction and instruction.starts_line:
+ lineno = instruction.starts_line
+ else:
+ lineno = code.co_firstlineno
+ details = f'file "{code.co_filename}", line {lineno}, in {code.co_name}'
+ super().__init__(f"{message} ({details})")
+
+
+def _localscope(
+ func: Union[types.FunctionType, types.CodeType],
+ *,
+ predicate: Callable,
+ allowed: Set[str],
+ allow_closure: bool,
+ _globals: Dict[str, Any],
+):
+ """
+ Args:
+ ...: Same as for the wrapper :func:`localscope`.
+ _globals : Globals associated with the root callable which are passed to
+ dependent code blocks for analysis.
+ """
+
+ # Extract global variables from a function
+ # (https://docs.python.org/3/library/types.html#types.FunctionType) or keep the
+ # explicitly provided globals for code objects
+ # (https://docs.python.org/3/library/types.html#types.CodeType).
if isinstance(func, types.FunctionType):
code = func.__code__
_globals = {**func.__globals__, **inspect.getclosurevars(func).nonlocals}
else:
code = func
- _globals = _globals or {}
- # Add function arguments to the list of allowed exceptions
+ # Add function arguments to the list of allowed exceptions.
allowed.update(code.co_varnames[: code.co_argcount])
- opnames = {"LOAD_GLOBAL"}
+ # Construct set of forbidden operations. The first accesses global variables. The
+ # second accesses variables from the outer scope.
+ forbidden_opnames = {"LOAD_GLOBAL"}
if not allow_closure:
- opnames.add("LOAD_DEREF")
+ forbidden_opnames.add("LOAD_DEREF")
LOGGER.info("analysing instructions for %s...", func)
for instruction in dis.get_instructions(code):
LOGGER.info(instruction)
name = instruction.argval
- if instruction.opname in opnames:
- # Explicitly allowed
+ if instruction.opname in forbidden_opnames:
+ # Variable explicitly allowed by name or in `builtins`.
if name in allowed or hasattr(builtins, name):
continue
- # Complain if the variable is not available
+ # Complain if the variable is not available.
if name not in _globals:
- raise NameError(f"`{name}` is not in globals")
- # Get the value of the variable and check it against the predicate
+ raise LocalscopeException(
+ f"`{name}` is not in globals", code, instruction
+ )
+ # Check if variable is allowed by value.
value = _globals[name]
if not predicate(value):
- raise ValueError(f"`{name}` is not a permitted global")
+ raise LocalscopeException(
+ f"`{name}` is not a permitted global", code, instruction
+ )
elif instruction.opname == "STORE_DEREF":
+ # Store a new allowed variable which has been created in the scope of the
+ # function.
allowed.add(name)
+
# Deal with code objects recursively after adding the current arguments to the
# allowed exceptions
for const in code.co_consts:
if isinstance(const, types.CodeType):
- localscope(
+ _localscope(
const,
_globals=_globals,
allow_closure=True,
|
tillahoffmann/localscope
|
fe4334355ea6e7bd1af0a15509b1f7a65f9da3b0
|
diff --git a/tests/test_localscope.py b/tests/test_localscope.py
index 41bc69c..232a966 100644
--- a/tests/test_localscope.py
+++ b/tests/test_localscope.py
@@ -1,4 +1,4 @@
-from localscope import localscope
+from localscope import localscope, LocalscopeException
import uuid
import pytest
@@ -16,15 +16,24 @@ def test_vanilla_function():
def test_missing_global():
- with pytest.raises(NameError):
+ def func():
+ return never_declared # noqa: F821
- @localscope
- def func():
- return never_ever_declared # noqa: F821
+ with pytest.raises(LocalscopeException, match="`never_declared` is not in globals"):
+ localscope(func)
+
+ # IMPORTANT! This function can be executed, but localscope complains because the
+ # global variable is not defined at the time when the function is analysed. This
+ # could be improved, but, most likely, one shouldn't write functions that rely on
+ # future globals in the first place.
+ """
+ never_declared = 123
+ assert func() == 123
+ """
def test_forbidden_global():
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
@localscope
def return_forbidden_global():
@@ -57,7 +66,7 @@ def test_closure():
return return_forbidden_closure()
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_closure` is not a perm"):
wrapper()
@@ -76,7 +85,7 @@ def test_allow_any_closure():
def test_allow_custom_predicate():
decorator = localscope(predicate=lambda x: isinstance(x, int))
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
@decorator
def return_forbidden_global():
@@ -90,7 +99,7 @@ def test_allow_custom_predicate():
def test_comprehension():
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`integer_global` is not a perm"):
@localscope
def evaluate_mse(xs, ys): # missing argument integer_global
@@ -98,7 +107,7 @@ def test_comprehension():
def test_recursive():
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
@localscope
def wrapper():
@@ -108,6 +117,17 @@ def test_recursive():
return return_forbidden_global()
+def test_recursive_without_call():
+ # We even raise an exception if we don't call a function. That's necessary because
+ # we can't trace all possible execution paths without actually running the function.
+ with pytest.raises(LocalscopeException, match="`forbidden_global` is not a perm"):
+
+ @localscope
+ def wrapper():
+ def return_forbidden_global():
+ return forbidden_global
+
+
def test_recursive_local_closure():
@localscope
def wrapper():
@@ -134,7 +154,7 @@ def test_mfc():
x = 1
- with pytest.raises(ValueError):
+ with pytest.raises(LocalscopeException, match="`x` is not a permitted"):
@localscope.mfc
def breakit():
|
Add hints for where the offending variable is used...
... to make debugging easier.
|
0.0
|
fe4334355ea6e7bd1af0a15509b1f7a65f9da3b0
|
[
"[",
"[100%]",
"tests/test_localscope.py::test_vanilla_function",
"tests/test_localscope.py::test_missing_global",
"tests/test_localscope.py::test_forbidden_global",
"tests/test_localscope.py::test_builtin",
"tests/test_localscope.py::test_allowed",
"tests/test_localscope.py::test_closure",
"tests/test_localscope.py::test_allow_any_closure",
"tests/test_localscope.py::test_allow_custom_predicate",
"tests/test_localscope.py::test_comprehension",
"tests/test_localscope.py::test_recursive",
"tests/test_localscope.py::test_recursive_without_call",
"tests/test_localscope.py::test_recursive_local_closure",
"tests/test_localscope.py::test_mfc",
"tests/test_localscope.py::test_comprehension_with_argument",
"tests/test_localscope.py::test_comprehension_with_closure",
"tests/test_localscope.py::test_argument",
"tests/test_localscope.py::test_argument_with_closure",
"tests/test_localscope.py::test_local_deref"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-17 01:12:54+00:00
|
mit
| 5,912 |
|
tillahoffmann__util-2
|
diff --git a/util/__init__.py b/util/__init__.py
index c22bc98..ebf2052 100644
--- a/util/__init__.py
+++ b/util/__init__.py
@@ -1,2 +1,2 @@
-from sampling import *
-from plotting import *
\ No newline at end of file
+from .sampling import *
+from .plotting import *
\ No newline at end of file
diff --git a/util/sampling/__init__.py b/util/sampling/__init__.py
index e31a4c2..06d495d 100644
--- a/util/sampling/__init__.py
+++ b/util/sampling/__init__.py
@@ -1,3 +1,2 @@
from .metropolis import AdaptiveMetropolisSampler, MetropolisSampler
from .hamiltonian import HamiltonianSampler
-from .adaptive_rejection import AdaptiveRejectionSampler
diff --git a/util/sampling/adaptive_rejection.py b/util/sampling/adaptive_rejection.py
index d5d1b52..0125edc 100644
--- a/util/sampling/adaptive_rejection.py
+++ b/util/sampling/adaptive_rejection.py
@@ -25,7 +25,6 @@ class AdaptiveRejectionSampler:
lower and upper bound of the domain on which the distribution is supported. If `domain` is not given, the
domain is the positive real line.
"""
-
def __init__(self, fun, x0=None, args=None, jac=None, domain=None):
# Copy the function and the Jacobian
self.fun = fun
diff --git a/util/sampling/base.py b/util/sampling/base.py
index 970a28a..422f61e 100644
--- a/util/sampling/base.py
+++ b/util/sampling/base.py
@@ -13,7 +13,7 @@ class BaseSampler(object):
Parameters
----------
fun : callable
- log-posterior or log-likelihood function taking a vector of parameters as its first argument
+ negative log-posterior or log-likelihood function taking a vector of parameters as its first argument
args : array_like
additional arguments to pass to `fun`
parameter_names : list
@@ -62,7 +62,7 @@ class BaseSampler(object):
def grid_density_plot(self, burn_in=0, parameters=None, values=None, nrows=None, ncols=None, bins=10):
"""
- Plot the marginal densities of parameters (and vertical lines indicating the true values).
+ Plot the marginal densities of parameters (and vertical lines indicating the true values).
Parameters
----------
diff --git a/util/sampling/hamiltonian.py b/util/sampling/hamiltonian.py
index a9937cd..a96aa63 100644
--- a/util/sampling/hamiltonian.py
+++ b/util/sampling/hamiltonian.py
@@ -15,8 +15,8 @@ class HamiltonianSampler(BaseSampler):
Parameters
----------
fun : callable
- log-posterior or log-likelihood function taking a vector of parameters as its first argument and its derivative
- if `jac` is not given
+ negative log-posterior or log-likelihood function taking a vector of parameters as its first argument and its
+ derivative if `jac` is not given
args : array_like
additional arguments to pass to `fun`
parameter_names : list
@@ -36,6 +36,7 @@ class HamiltonianSampler(BaseSampler):
leapfrog_steps=10):
super(HamiltonianSampler, self).__init__(fun, args, parameter_names, break_on_interrupt)
self.jac = jac
+ # Load the mass matrix from disk if given
if isinstance(mass, str):
self.mass = np.loadtxt(mass)
else:
@@ -148,7 +149,7 @@ class HamiltonianSampler(BaseSampler):
for leapfrog_step in range(leapfrog_steps):
# Make a half step for the leapfrog algorithm
- momentum = momentum + 0.5 * epsilon * jac
+ momentum = momentum - 0.5 * epsilon * jac
# Update the position
if self.mass.ndim < 2:
parameters_end = parameters_end + epsilon * self.inv_mass * momentum
@@ -160,7 +161,7 @@ class HamiltonianSampler(BaseSampler):
else:
fun_value_end, jac = self.fun(parameters_end, *self.args)
# Make another half-step
- momentum = momentum + 0.5 * epsilon * jac
+ momentum = momentum - 0.5 * epsilon * jac
if full:
# Append parameters
@@ -182,7 +183,7 @@ class HamiltonianSampler(BaseSampler):
kinetic_end = self.evaluate_kinetic(momentum)
# Accept or reject the step
- if np.log(np.random.uniform()) < fun_value_end + kinetic_end - fun_value - kinetic:
+ if np.log(np.random.uniform()) < - fun_value_end + kinetic_end + fun_value - kinetic:
parameters = parameters_end
fun_value = fun_value_end
diff --git a/util/sampling/metropolis.py b/util/sampling/metropolis.py
index 8c51fcc..91ea47b 100644
--- a/util/sampling/metropolis.py
+++ b/util/sampling/metropolis.py
@@ -9,7 +9,7 @@ class MetropolisSampler(BaseSampler):
Parameters
----------
fun : callable
- log-posterior or log-likelihood function taking a vector of parameters as its first argument
+ negative log-posterior or log-likelihood function taking a vector of parameters as its first argument
proposal_covariance : array_like
covariance of the Gaussian proposal distribution
args : array_like
@@ -45,7 +45,7 @@ class MetropolisSampler(BaseSampler):
# Compute the function at the proposed sample
fun_proposal = self.fun(proposal, *self.args)
# Accept or reject the step
- if fun_proposal - fun_current > np.log(np.random.uniform()):
+ if fun_proposal - fun_current < np.log(np.random.uniform()):
# Update the log posterior and the parameter values
fun_current = fun_proposal
parameters = proposal
|
tillahoffmann/util
|
15ece9b7591b75bc9b59367f0349edaeb1c45eaf
|
diff --git a/tests/test_sampling.py b/tests/test_sampling.py
index 70d1e98..8e14777 100644
--- a/tests/test_sampling.py
+++ b/tests/test_sampling.py
@@ -12,17 +12,17 @@ for num_parameters in [1, 3, 5]:
covariance = np.diag(1 + np.random.gamma(1, size=num_parameters))
# Create a metropolis sampler
- sampler = sampling.MetropolisSampler(lambda x, mean=mean, covariance=covariance: log_gaussian(x, mean, covariance)[0],
+ sampler = sampling.MetropolisSampler(lambda x, mean=mean, covariance=covariance: -log_gaussian(x, mean, covariance)[0],
covariance / num_parameters)
params.append((mean, covariance, sampler))
# Create an adaptive metropolis sampler
- sampler = sampling.AdaptiveMetropolisSampler(lambda x, mean=mean, covariance=covariance: log_gaussian(x, mean, covariance)[0])
+ sampler = sampling.AdaptiveMetropolisSampler(lambda x, mean=mean, covariance=covariance: -log_gaussian(x, mean, covariance)[0])
params.append((mean, covariance, sampler))
# Create a Hamiltonian metropolis sampler
- sampler = sampling.HamiltonianSampler(lambda x, mean=mean, covariance=covariance: log_gaussian(x, mean, covariance)[0],
- jac=lambda x, mean=mean, covariance=covariance: log_gaussian(x, mean, covariance)[1],
+ sampler = sampling.HamiltonianSampler(lambda x, mean=mean, covariance=covariance: -log_gaussian(x, mean, covariance)[0],
+ jac=lambda x, mean=mean, covariance=covariance: -log_gaussian(x, mean, covariance)[1],
mass=covariance)
params.append((mean, covariance, sampler))
|
Use the negative log likelihood (or loss) for the samplers
To match convention with machine learning libraries that minimise the loss.
|
0.0
|
15ece9b7591b75bc9b59367f0349edaeb1c45eaf
|
[
"tests/test_sampling.py::test_sampling[mean0-covariance0-sampler0]",
"tests/test_sampling.py::test_sampling[mean1-covariance1-sampler1]",
"tests/test_sampling.py::test_sampling[mean2-covariance2-sampler2]",
"tests/test_sampling.py::test_sampling[mean3-covariance3-sampler3]",
"tests/test_sampling.py::test_sampling[mean4-covariance4-sampler4]",
"tests/test_sampling.py::test_sampling[mean5-covariance5-sampler5]",
"tests/test_sampling.py::test_sampling[mean6-covariance6-sampler6]",
"tests/test_sampling.py::test_sampling[mean7-covariance7-sampler7]",
"tests/test_sampling.py::test_sampling[mean8-covariance8-sampler8]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-11-08 18:52:16+00:00
|
mit
| 5,913 |
|
timeoutdigital__treehugger-63
|
diff --git a/HISTORY.rst b/HISTORY.rst
index f4be37f..8b242d6 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -9,6 +9,7 @@ Pending Release
* (Insert new release notes below this line)
* Add ``--json`` argument to ``print`` command.
+* Make 'edit' reuse previous encrypted values that weren't changed.
1.1.0 (2017-04-05)
------------------
diff --git a/treehugger/kms.py b/treehugger/kms.py
index 47cc221..bb02d68 100644
--- a/treehugger/kms.py
+++ b/treehugger/kms.py
@@ -11,6 +11,9 @@ from .ec2 import get_current_region
class KMSAgent(object):
+ def __init__(self):
+ self.cache = {}
+
key_id = 'alias/treehugger'
@property
@@ -29,9 +32,14 @@ class KMSAgent(object):
CiphertextBlob=cipher_blob,
EncryptionContext=encryption_context,
)
- return response['Plaintext'].decode('utf-8')
+ plaintext = response['Plaintext'].decode('utf-8')
+ self.cache[plaintext] = base64_ciphertext
+ return plaintext
def encrypt(self, plaintext, encryption_context):
+ if plaintext in self.cache:
+ return self.cache[plaintext]
+
response = self.kms_client.encrypt(
KeyId=self.key_id,
Plaintext=plaintext.encode('utf-8'),
|
timeoutdigital/treehugger
|
9a8de50a6f029c627dd82368bd335921ef8dc35a
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index c24ae65..1357704 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -191,11 +191,34 @@ class TestCLI:
'Plaintext': b'quux',
}
)
+
+ def fake_call(command):
+ assert len(command) == 2
+ assert command[0] == 'nano'
+ filename = command[1]
+ with open(filename, 'r') as fp:
+ obj = yaml.safe_load(fp.read())
+ assert obj == {
+ 'MY_ENCRYPTED_VAR': {'to_encrypt': 'quux'},
+ 'MY_UNENCRYPTED_VAR': 'bar',
+ 'TREEHUGGER_APP': 'baz',
+ 'TREEHUGGER_STAGE': 'qux',
+ }
+
+ with open(filename, 'w') as fp:
+ fp.write(textwrap.dedent('''\
+ MY_ENCRYPTED_VAR: {to_encrypt: quux2}
+ MY_UNENCRYPTED_VAR: bar
+ TREEHUGGER_APP: baz
+ TREEHUGGER_STAGE: qux
+ '''))
+ return 0
+
kms_stub.add_response(
'encrypt',
expected_params={
'KeyId': 'alias/treehugger',
- 'Plaintext': b'quux',
+ 'Plaintext': b'quux2',
'EncryptionContext': {
'treehugger_app': 'baz',
'treehugger_key': 'MY_ENCRYPTED_VAR',
@@ -207,6 +230,34 @@ class TestCLI:
'CiphertextBlob': b'foo',
}
)
+ with mock.patch.dict(os.environ, {'EDITOR': 'nano'}), mock.patch('subprocess.call', new=fake_call):
+ main(['edit', six.text_type(tmpfile)])
+
+ def test_edit_no_change(self, tmpdir, kms_stub):
+ tmpfile = tmpdir.join('test.yml')
+ encrypted_var = base64.b64encode(b'foo')
+ tmpfile.write(textwrap.dedent('''\
+ MY_ENCRYPTED_VAR:
+ encrypted: {encrypted_var}
+ MY_UNENCRYPTED_VAR: bar
+ TREEHUGGER_APP: baz
+ TREEHUGGER_STAGE: qux
+ '''.format(encrypted_var=encrypted_var.decode('utf-8'))))
+ kms_stub.add_response(
+ 'decrypt',
+ expected_params={
+ 'CiphertextBlob': b'foo',
+ 'EncryptionContext': {
+ 'treehugger_app': 'baz',
+ 'treehugger_key': 'MY_ENCRYPTED_VAR',
+ 'treehugger_stage': 'qux',
+ }
+ },
+ service_response={
+ 'KeyId': 'treehugger',
+ 'Plaintext': b'quux',
+ }
+ )
with mock.patch.dict(os.environ, {'EDITOR': 'nano'}), mock.patch('subprocess.call') as mock_call:
mock_call.return_value = 0
|
Make 'edit' reuse previous encrypted values that weren't changed
When editing a file, every `to_encrypt` value gets re-encrypted, which changes all the encrypted values as the encrypted blob KMS uses seems to include a timestamp. This is less than ideal as it means changing one password changes the whole file, leading to horrible diffs.
It would be nice if when running 'edit', treehugger compared the before/after, and reused the `encrypted` values for all those things that haven't changed their `to_encrypt` forms. This could maybe be done with a cache layer around the decrypt/encrypt calls to KMS, such that re-encrypting reuses the value found when decrypting to edit.
|
0.0
|
9a8de50a6f029c627dd82368bd335921ef8dc35a
|
[
"tests/test_cli.py::TestCLI::test_edit_no_change"
] |
[
"tests/test_cli.py::TestCLI::test_edit",
"tests/test_cli.py::TestCLI::test_help",
"tests/test_cli.py::TestCLI::test_print_json",
"tests/test_cli.py::TestCLI::test_print_single_line",
"tests/test_cli.py::TestCLI::test_exec_no_command_but_dashes",
"tests/test_cli.py::TestCLI::test_print_file",
"tests/test_cli.py::TestCLI::test_exec_no_command",
"tests/test_cli.py::TestCLI::test_print_only_unencrypted",
"tests/test_cli.py::TestCLI::test_print_json_single_line",
"tests/test_cli.py::TestCLI::test_print_no_var_with_quote",
"tests/test_cli.py::TestCLI::test_exec_file"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-08-31 09:44:08+00:00
|
isc
| 5,914 |
|
timmahrt__praatIO-31
|
diff --git a/examples/add_tiers.py b/examples/add_tiers.py
index ef90f73..9b06542 100644
--- a/examples/add_tiers.py
+++ b/examples/add_tiers.py
@@ -13,6 +13,7 @@ if not os.path.exists(outputPath):
os.mkdir(outputPath)
tgPhones = tgio.openTextgrid(join(path, "bobby_phones.TextGrid"))
+elanTgPhones = tgio.openTextgrid(join(path, "bobby_phones_elan.TextGrid"))
tgWords = tgio.openTextgrid(join(path, "bobby_words.TextGrid"))
tgPhones.addTier(tgWords.tierDict["word"])
diff --git a/examples/files/bobby_phones_elan.TextGrid b/examples/files/bobby_phones_elan.TextGrid
new file mode 100644
index 0000000..63843ab
--- /dev/null
+++ b/examples/files/bobby_phones_elan.TextGrid
@@ -0,0 +1,74 @@
+File type = "ooTextFile"
+Object class = "TextGrid"
+
+xmin = 0.0
+xmax = 1.194625
+tiers? <exists>
+size = 1
+item []:
+ item[1]:
+ class = "IntervalTier"
+ name = "phone"
+ xmin = 0.0
+ xmax = 1.18979591837
+ intervals: size = 15
+ intervals [1]
+ xmin = 0.0124716553288
+ xmax = 0.06469123242311078
+ text = ""
+ intervals [2]
+ xmin = 0.06469123242311078
+ xmax = 0.08438971390281873
+ text = "B"
+ intervals [3]
+ xmin = 0.08438971390281873
+ xmax = 0.23285789838876556
+ text = "AA1"
+ intervals [4]
+ xmin = 0.23285789838876556
+ xmax = 0.2788210218414174
+ text = "B"
+ intervals [5]
+ xmin = 0.2788210218414174
+ xmax = 0.41156462585
+ text = "IY0"
+ intervals [6]
+ xmin = 0.41156462585
+ xmax = 0.47094510353588265
+ text = "R"
+ intervals [7]
+ xmin = 0.47094510353588265
+ xmax = 0.521315192744
+ text = "IH1"
+ intervals [8]
+ xmin = 0.521315192744
+ xmax = 0.658052967538796
+ text = "PT"
+ intervals [9]
+ xmin = 0.658052967538796
+ xmax = 0.680952380952
+ text = "DH"
+ intervals [10]
+ xmin = 0.680952380952
+ xmax = 0.740816326531
+ text = "AH0"
+ intervals [11]
+ xmin = 0.740816326531
+ xmax = 0.807647261005538
+ text = "L"
+ intervals [12]
+ xmin = 0.807647261005538
+ xmax = 0.910430839002
+ text = "EH1"
+ intervals [13]
+ xmin = 0.910430839002
+ xmax = 0.980272108844
+ text = "JH"
+ intervals [14]
+ xmin = 0.980272108844
+ xmax = 1.1171482864527198
+ text = "ER0"
+ intervals [15]
+ xmin = 1.1171482864527198
+ xmax = 1.18979591837
+ text = ""
diff --git a/examples/files/bobby_words_with_newlines_longfile_elan.TextGrid b/examples/files/bobby_words_with_newlines_longfile_elan.TextGrid
new file mode 100644
index 0000000..f1fcdfa
--- /dev/null
+++ b/examples/files/bobby_words_with_newlines_longfile_elan.TextGrid
@@ -0,0 +1,82 @@
+File type = "ooTextFile"
+Object class = "TextGrid"
+
+xmin = 0
+xmax = 1.194625
+tiers? <exists>
+size = 3
+item []:
+ item[1]:
+ class = "IntervalTier"
+ name = """word"""
+ xmin = 0
+ xmax = 1.194625
+ intervals: size = 6
+ intervals [1]
+ xmin = 0
+ xmax = 0.06469123242311078
+ text = ""
+ intervals [2]
+ xmin = 0.06469123242311078
+ xmax = 0.41156462585
+ text = """""""BOBBY""""""
+Noun"
+ intervals [3]
+ xmin = 0.41156462585
+ xmax = 0.6576881808447274
+ text = "RIPPED
+Verb"
+ intervals [4]
+ xmin = 0.6576881808447274
+ xmax = 0.740816326531
+ text = "THE
+Determiner"
+ intervals [5]
+ xmin = 0.740816326531
+ xmax = 1.1171482864527198
+ text = "LEDGER
+Noun"
+ intervals [6]
+ xmin = 1.1171482864527198
+ xmax = 1.194625
+ text = ""
+ item[2]:
+ class = "IntervalTier"
+ name = "phrase"
+ xmin = 0
+ xmax = 1.194625
+ intervals: size = 3
+ intervals [1]
+ xmin = 0
+ xmax = 0.06469123242311078
+ text = ""
+ intervals [2]
+ xmin = 0.06469123242311078
+ xmax = 1.1171482864527198
+ text = "BOBBY RIPPED THE LEDGER"
+ intervals [3]
+ xmin = 1.1171482864527198
+ xmax = 1.194625
+ text = ""
+ item[3]:
+ class = "TextTier"
+ name = ""
+ xmin = 0
+ xmax = 1.194625
+ points: size = 4
+ points [1]
+ number = 0.23290458517889742
+ mark = "133
+p1"
+ points [2]
+ number = 0.5304541883551366
+ mark = "0
+p2"
+ points [3]
+ number = 0.6966964767693916
+ mark = "93
+p3"
+ points [4]
+ number = 0.9231714783772174
+ mark = "85
+p4"
diff --git a/praatio/tgio.py b/praatio/tgio.py
index e5f0afc..aa64649 100644
--- a/praatio/tgio.py
+++ b/praatio/tgio.py
@@ -1861,7 +1861,7 @@ def openTextgrid(fnFullPath, readRaw=False, readAsJson=False):
data = data.replace("\r\n", "\n")
caseA = "ooTextFile short" in data
- caseB = "item [" not in data
+ caseB = not re.search(r"item ?\[", data)
if caseA or caseB:
textgrid = _parseShortTextgrid(data)
else:
@@ -1883,7 +1883,7 @@ def _parseNormalTextgrid(data):
newTG = Textgrid()
# Toss textgrid header
- header, data = data.split("item [", 1)
+ header, data = re.split(r'item ?\[', data, maxsplit=1, flags=re.MULTILINE)
headerList = header.split("\n")
tgMin = float(headerList[3].split("=")[1].strip())
@@ -1893,59 +1893,58 @@ def _parseNormalTextgrid(data):
newTG.maxTimestamp = tgMax
# Process each tier individually (will be output to separate folders)
- tierList = data.split("item [")[1:]
+ tierList = re.split(r"item ?\[", data, flags=re.MULTILINE)[1:]
for tierTxt in tierList:
hasData = True
if 'class = "IntervalTier"' in tierTxt:
tierType = INTERVAL_TIER
- searchWord = "intervals ["
+ searchWord = r"intervals ?\["
else:
tierType = POINT_TIER
- searchWord = "points ["
+ searchWord = r"points ?\["
# Get tier meta-information
try:
- header, tierData = tierTxt.split(searchWord, 1)
+ d = re.split(searchWord, tierTxt, flags=re.MULTILINE)
+ header, tierData = d[0], d[1:]
except ValueError:
# A tier with no entries
- if "size = 0" in tierTxt:
+ if re.search(r"size ?= ?0", tierTxt):
header = tierTxt
tierData = ""
hadData = False
else:
raise
- tierName = header.split("name = ")[1].split("\n", 1)[0]
- tierName, tierNameI = _fetchTextRow(header, 0, "name = ")
- tierStart = header.split("xmin = ")[1].split("\n", 1)[0]
+ tierName = re.search(r"name ?= ?\"(.*)\"\s*$", header, flags=re.MULTILINE).groups()[0]
+ tierName = re.sub(r'""', '"', tierName)
+
+ tierStart = re.search(r"xmin ?= ?([\d.]+)\s*$", header, flags=re.MULTILINE).groups()[0]
tierStart = strToIntOrFloat(tierStart)
- tierEnd = header.split("xmax = ")[1].split("\n", 1)[0]
+
+ tierEnd = re.search(r"xmax ?= ?([\d.]+)\s*$", header, flags=re.MULTILINE).groups()[0]
tierEnd = strToIntOrFloat(tierEnd)
# Get the tier entry list
tierEntryList = []
labelI = 0
if tierType == INTERVAL_TIER:
- while True:
- try:
- timeStart, timeStartI = _fetchRow(tierData, labelI, "xmin = ")
- timeEnd, timeEndI = _fetchRow(tierData, timeStartI, "xmax = ")
- label, labelI = _fetchTextRow(tierData, timeEndI, "text = ")
- except (ValueError, IndexError):
- break
+ for element in tierData:
+ timeStart = re.search(r"xmin ?= ?([\d.]+)\s*$", element, flags=re.MULTILINE).groups()[0]
+ timeEnd = re.search(r"xmax ?= ?([\d.]+)\s*$", element, flags=re.MULTILINE).groups()[0]
+ label = re.search(r"text ?= ?\"(.*)\"\s*$", element, flags=re.MULTILINE|re.DOTALL).groups()[0]
label = label.strip()
+ label = re.sub(r'""', '"', label)
tierEntryList.append((timeStart, timeEnd, label))
tier = IntervalTier(tierName, tierEntryList, tierStart, tierEnd)
- else:
- while True:
- try:
- time, timeI = _fetchRow(tierData, labelI, "number = ")
- label, labelI = _fetchTextRow(tierData, timeI, "mark = ")
- except (ValueError, IndexError):
- break
+
+ else:
+ for element in tierData:
+ time = re.search(r"number ?= ?([\d.]+)\s*$", element, flags=re.MULTILINE).groups()[0]
+ label = re.search(r"mark ?= ?\"(.*)\"\s*$", element, flags=re.MULTILINE|re.DOTALL).groups()[0]
label = label.strip()
tierEntryList.append((time, label))
tier = PointTier(tierName, tierEntryList, tierStart, tierEnd)
|
timmahrt/praatIO
|
056525d1f5a2c6e6337b64ea15134e571a231a4c
|
diff --git a/examples/test/io_tests.py b/examples/test/io_tests.py
index d309c1b..60844bb 100644
--- a/examples/test/io_tests.py
+++ b/examples/test/io_tests.py
@@ -110,6 +110,15 @@ class IOTests(unittest.TestCase):
self.assertTrue(areTheSame(inputFN, outputFN, readFile))
+ fn = "bobby_words_with_newlines_longfile_elan.TextGrid"
+ elanInputFN = join(self.dataRoot, fn)
+ elanOutputFN = join(self.outputRoot, fn)
+
+ tg = tgio.openTextgrid(elanInputFN)
+ tg.save(elanOutputFN, useShortForm=False)
+
+ self.assertTrue(areTheSame(inputFN, elanOutputFN, readFile))
+
def test_tg_io(self):
"""Tests for reading/writing textgrid io"""
fn = "textgrid_to_merge.TextGrid"
|
Issues parsing TextGrids from ELAN
I've had a couple of users reporting issues with loading TextGrids exported from ELAN. The issue seems to be that the "item [1]" lines are formatted without a space ("item[1]"), so the parsing in https://github.com/timmahrt/praatIO/blob/master/praatio/tgio.py#L1896 fails. I think a reasonable fix would be something like `re.split(r'item ?\[', data, flags=re.MULTILINE)[1:]`.
Looks like you're working on a 5.0, so don't know if that would be the place to fix it or if it would be better for me to submit a PR for the main branch.
|
0.0
|
056525d1f5a2c6e6337b64ea15134e571a231a4c
|
[
"examples/test/io_tests.py::IOTests::test_reading_long_textgrids_with_newlines_in_labels"
] |
[
"examples/test/io_tests.py::IOTests::test_duration_tier_io",
"examples/test/io_tests.py::IOTests::test_get_audio_duration",
"examples/test/io_tests.py::IOTests::test_kg_io",
"examples/test/io_tests.py::IOTests::test_pitch_io",
"examples/test/io_tests.py::IOTests::test_pitch_io_long_vs_short",
"examples/test/io_tests.py::IOTests::test_point_process_io",
"examples/test/io_tests.py::IOTests::test_point_process_io_long_vs_short",
"examples/test/io_tests.py::IOTests::test_reading_textgrids_with_newlines_in_labels",
"examples/test/io_tests.py::IOTests::test_save",
"examples/test/io_tests.py::IOTests::test_save_with_force_larger_value_as_maximum_time",
"examples/test/io_tests.py::IOTests::test_save_with_force_too_large_minimum_time",
"examples/test/io_tests.py::IOTests::test_save_with_force_zero_as_minimum_time",
"examples/test/io_tests.py::IOTests::test_save_with_ignore_blank_sections",
"examples/test/io_tests.py::IOTests::test_save_with_minimum_interval_length",
"examples/test/io_tests.py::IOTests::test_save_with_minimum_time_stamp",
"examples/test/io_tests.py::IOTests::test_saving_and_loading_json",
"examples/test/io_tests.py::IOTests::test_saving_long_textgrid",
"examples/test/io_tests.py::IOTests::test_saving_short_textgrid",
"examples/test/io_tests.py::IOTests::test_tg_io",
"examples/test/io_tests.py::IOTests::test_tg_io_long_vs_short"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-01 16:15:40+00:00
|
mit
| 5,915 |
|
timvink__mkdocs-git-revision-date-localized-plugin-85
|
diff --git a/docs/available-variables.md b/docs/available-variables.md
index 168bbb0..6843a92 100644
--- a/docs/available-variables.md
+++ b/docs/available-variables.md
@@ -32,11 +32,13 @@ To allow for more flexibility when overriding a theme there are also variables f
- `page.meta.git_revision_date_localized_raw_iso_date`
- `page.meta.git_revision_date_localized_raw_iso_datetime`
- `page.meta.git_revision_date_localized_raw_timeago`
+- `page.meta.git_revision_date_localized_raw_custom`
- `page.meta.git_site_revision_date_localized_raw_datetime`
- `page.meta.git_site_revision_date_localized_raw_iso_date`
- `page.meta.git_site_revision_date_localized_raw_date`
- `page.meta.git_site_revision_date_localized_raw_iso_datetime`
- `page.meta.git_site_revision_date_localized_raw_timeago`
+- `page.meta.git_site_revision_date_localized_raw_custom`
And if you've enabled creation date in the config:
@@ -45,6 +47,7 @@ And if you've enabled creation date in the config:
- `page.meta.git_creation_date_localized_raw_iso_date`
- `page.meta.git_creation_date_localized_raw_iso_datetime`
- `page.meta.git_creation_date_localized_raw_timeago`
+- `page.meta.git_creation_date_localized_raw_custom`
!!! warning "timeago.js dependency"
diff --git a/docs/options.md b/docs/options.md
index 2f87302..666ae87 100644
--- a/docs/options.md
+++ b/docs/options.md
@@ -8,6 +8,7 @@ You can customize the plugin by setting options in `mkdocs.yml`. For example:
plugins:
- git-revision-date-localized:
type: timeago
+ custom_format: "%d. %B %Y"
timezone: Europe/Amsterdam
locale: en
fallback_to_build_date: false
@@ -19,7 +20,7 @@ You can customize the plugin by setting options in `mkdocs.yml`. For example:
## `type`
-Default is `date`. The format of the date to be displayed. Valid values are `date`, `datetime`, `iso_date`, `iso_datetime` and `timeago`. Example outputs:
+Default is `date`. The format of the date to be displayed. Valid values are `date`, `datetime`, `iso_date`, `iso_datetime`, `timeago` and `custom`. Example outputs:
```yaml
November 28, 2019 # type: date (default)
@@ -27,8 +28,13 @@ November 28, 2019 13:57:28 # type: datetime
2019-11-28 # type: iso_date
2019-11-28 13:57:26 # type: iso_datetime
20 hours ago # type: timeago
+28. November 2019 # type: custom
```
+## `custom_format`
+
+Default is `%d. %B %Y`. The date format used when `type: custom`. Passed to python's `strftime`, see the [cheatsheat](https://strftime.org/) for details.
+
## `timezone`
Default is `UTC`. Specify a time zone database name ([reference](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)). This option is especially relevant when using `type: datetime` and `type: iso_datetime`. Note that when using [timeago](http://timeago.yarp.com/) (with `type: timeago`) any difference in time zones between server and client will be handled automatically.
diff --git a/mkdocs_git_revision_date_localized_plugin/plugin.py b/mkdocs_git_revision_date_localized_plugin/plugin.py
index 9a3bdb3..9d609fc 100644
--- a/mkdocs_git_revision_date_localized_plugin/plugin.py
+++ b/mkdocs_git_revision_date_localized_plugin/plugin.py
@@ -37,6 +37,7 @@ class GitRevisionDateLocalizedPlugin(BasePlugin):
("fallback_to_build_date", config_options.Type(bool, default=False)),
("locale", config_options.Type(str, default=None)),
("type", config_options.Type(str, default="date")),
+ ("custom_format", config_options.Type(str, default="%d. %B %Y")),
("timezone", config_options.Type(str, default="UTC")),
("exclude", config_options.Type(list, default=[])),
("enable_creation_date", config_options.Type(bool, default=False)),
@@ -61,7 +62,7 @@ class GitRevisionDateLocalizedPlugin(BasePlugin):
if not self.config.get('enabled'):
return config
- assert self.config['type'] in ["date","datetime","iso_date","iso_datetime","timeago"]
+ assert self.config['type'] in ["date","datetime","iso_date","iso_datetime","timeago","custom"]
self.util = Util(config=self.config)
@@ -195,7 +196,6 @@ class GitRevisionDateLocalizedPlugin(BasePlugin):
assert len(locale) == 2, "locale must be a 2 letter code, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes"
-
# Retrieve git commit timestamp
last_revision_timestamp = self.util.get_git_commit_timestamp(
path=page.file.abs_src_path,
diff --git a/mkdocs_git_revision_date_localized_plugin/util.py b/mkdocs_git_revision_date_localized_plugin/util.py
index 8fbf847..c93a5bd 100644
--- a/mkdocs_git_revision_date_localized_plugin/util.py
+++ b/mkdocs_git_revision_date_localized_plugin/util.py
@@ -46,7 +46,7 @@ class Util:
@staticmethod
def _date_formats(
- unix_timestamp: float, locale: str = "en", time_zone: str = "UTC"
+ unix_timestamp: float, locale: str = "en", time_zone: str = "UTC", custom_format: str = "%d. %B %Y"
) -> Dict[str, Any]:
"""
Calculate different date formats / types.
@@ -55,6 +55,7 @@ class Util:
unix_timestamp (float): A timestamp in seconds since 1970.
locale (str): Locale code of language to use. Defaults to 'en'.
time_zone (str): Timezone database name (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
+ custom_format (str): strftime format specifier for the 'custom' type
Returns:
dict: Different date formats.
@@ -77,8 +78,8 @@ class Util:
),
"iso_date": loc_revision_date.strftime("%Y-%m-%d"),
"iso_datetime": loc_revision_date.strftime("%Y-%m-%d %H:%M:%S"),
- "timeago": '<span class="timeago" datetime="%s" locale="%s"></span>'
- % (loc_revision_date.isoformat(), locale),
+ "timeago": '<span class="timeago" datetime="%s" locale="%s"></span>' % (loc_revision_date.isoformat(), locale),
+ "custom": loc_revision_date.strftime(custom_format),
}
def get_git_commit_timestamp(
@@ -195,7 +196,8 @@ class Util:
date_formats = self._date_formats(
unix_timestamp=commit_timestamp,
time_zone=self.config.get("timezone"),
- locale=locale
+ locale=locale,
+ custom_format=self.config.get('custom_format')
)
if add_spans:
date_formats = self.add_spans(date_formats)
|
timvink/mkdocs-git-revision-date-localized-plugin
|
1efdd246c3f44fdfd30cf747d426883d76359126
|
diff --git a/tests/fixtures/basic_project/mkdocs_custom_type.yml b/tests/fixtures/basic_project/mkdocs_custom_type.yml
new file mode 100644
index 0000000..7b57d11
--- /dev/null
+++ b/tests/fixtures/basic_project/mkdocs_custom_type.yml
@@ -0,0 +1,8 @@
+site_name: test gitrevisiondatelocalized_plugin
+use_directory_urls: true
+
+plugins:
+ - search
+ - git-revision-date-localized:
+ type: custom
+ custom_format: "%Y"
\ No newline at end of file
diff --git a/tests/test_builds.py b/tests/test_builds.py
index 320f1ac..fe338ff 100644
--- a/tests/test_builds.py
+++ b/tests/test_builds.py
@@ -300,6 +300,7 @@ MKDOCS_FILES = [
'basic_project/mkdocs_theme_timeago_instant.yml',
'basic_project/mkdocs_exclude.yml',
'basic_project/mkdocs_meta.yml',
+ 'basic_project/mkdocs_custom_type.yml',
# 'i18n/mkdocs.yml'
]
@@ -322,6 +323,7 @@ def test_date_formats():
"iso_date": "2020-02-22",
"iso_datetime": "2020-02-22 18:52:09",
"timeago": '<span class="timeago" datetime="2020-02-22T18:52:09+00:00" locale="en"></span>',
+ "custom": '22. February 2020',
}
@@ -363,9 +365,12 @@ def test_tags_are_replaced(tmp_path, mkdocs_file):
# Assert {{ git_revision_date_localized }} is replaced
date_formats_revision_date = Util()._date_formats(1642911026,
locale=plugin_config.get("locale"),
- time_zone=plugin_config.get("timezone"))
+ time_zone=plugin_config.get("timezone"),
+ custom_format=plugin_config.get("custom_format")
+ )
for k, v in date_formats_revision_date.items():
assert v is not None
+
date = date_formats_revision_date.get(plugin_config.get('type'))
assert re.search(rf"{date}\<\/span.+", contents)
@@ -373,7 +378,9 @@ def test_tags_are_replaced(tmp_path, mkdocs_file):
# Assert {{ git_site_revision_date_localized }} is replaced
date_formats_revision_date = Util()._date_formats(1643911026,
locale=plugin_config.get("locale"),
- time_zone=plugin_config.get("timezone"))
+ time_zone=plugin_config.get("timezone"),
+ custom_format=plugin_config.get("custom_format")
+ )
for k, v in date_formats_revision_date.items():
assert v is not None
date = date_formats_revision_date.get(plugin_config.get('type'))
@@ -384,7 +391,9 @@ def test_tags_are_replaced(tmp_path, mkdocs_file):
# The creation of page_with_tag.md was set in setup_commit_history to 1500854705 ( Mon Jul 24 2017 00:05:05 GMT+0000 )
date_formats_revision_date = Util()._date_formats(1500854705,
locale=plugin_config.get("locale"),
- time_zone=plugin_config.get("timezone"))
+ time_zone=plugin_config.get("timezone"),
+ custom_format=plugin_config.get("custom_format")
+ )
for k, v in date_formats_revision_date.items():
assert v is not None
date = date_formats_revision_date.get(plugin_config.get('type'))
|
Option to set a own date format separate from the locale
It is surprising that there isn't a setting to set your own date format to use.
The only current way is to use the `locale` option which has unwanted side effects such as altering the displayed name of the month.
The reason why there should be such an option is the simple fact, that not everyone is actually using the same format for a date commonly used in that language.
As an example am I using English as my site's primary language, while using the german date format (`01. January 2022`) over the English one (`January 01, 2022`).
This causes an issue where the plugin is using the English format while I would like to use the german one instead. The issue here is that changing the locale to `de` would change `January` to `Januar` which is in conflict with my site's main language.
I would really appreciate it if such a setting could be added.
|
0.0
|
1efdd246c3f44fdfd30cf747d426883d76359126
|
[
"tests/test_builds.py::test_date_formats"
] |
[
"tests/test_builds.py::test_git_not_available"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-02-21 09:50:34+00:00
|
mit
| 5,916 |
|
timvink__mkdocs-table-reader-plugin-10
|
diff --git a/mkdocs_table_reader_plugin/safe_eval.py b/mkdocs_table_reader_plugin/safe_eval.py
index 092ee37..71f8959 100644
--- a/mkdocs_table_reader_plugin/safe_eval.py
+++ b/mkdocs_table_reader_plugin/safe_eval.py
@@ -57,7 +57,7 @@ def safe_eval(string):
def parse_argkwarg(string: str):
"""
- Parses a string to detech both args and kwargs.
+ Parses a string to detect both args and kwargs.
Adapted code from
https://stackoverflow.com/questions/9305387/string-of-kwargs-to-kwargs
@@ -69,12 +69,13 @@ def parse_argkwarg(string: str):
args[List], kwargs[Dict]
"""
- argkwargs = re.split(r"(?<!\=)(?:,? )(?!\=)", string)
+ argkwargs = re.split(r"(?<!\=)(?:,{1} )(?!\=)", string)
args = []
kwargs = []
for i in argkwargs:
+ i = i.strip()
if "=" in i:
kwargs.append(i)
else:
diff --git a/setup.py b/setup.py
index 3e63940..da81650 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setup(
name="mkdocs-table-reader-plugin",
- version="0.4.0",
+ version="0.4.1",
description="MkDocs plugin to directly insert tables from files into markdown.",
long_description=long_description,
long_description_content_type="text/markdown",
|
timvink/mkdocs-table-reader-plugin
|
31f80de456e36005d4dcb5d610cb5ec2e89cabf7
|
diff --git a/tests/fixtures/data_path_with_space/docs/folder with spaces/basic_table.csv b/tests/fixtures/data_path_with_space/docs/folder with spaces/basic_table.csv
new file mode 100644
index 0000000..6743b2e
--- /dev/null
+++ b/tests/fixtures/data_path_with_space/docs/folder with spaces/basic_table.csv
@@ -0,0 +1,5 @@
+"a","b"
+40,73
+50,52
+531456,80
+"name","table1"
\ No newline at end of file
diff --git a/tests/fixtures/data_path_with_space/docs/index.md b/tests/fixtures/data_path_with_space/docs/index.md
new file mode 100644
index 0000000..8f4f35c
--- /dev/null
+++ b/tests/fixtures/data_path_with_space/docs/index.md
@@ -0,0 +1,7 @@
+# Test page
+
+This is a table that we load from the docs folder, because we set `data_path` to `docs`:
+
+## inserted with positional argument
+
+{{ read_csv("basic_table.csv") }}
diff --git a/tests/fixtures/data_path_with_space/mkdocs.yml b/tests/fixtures/data_path_with_space/mkdocs.yml
new file mode 100644
index 0000000..cd69053
--- /dev/null
+++ b/tests/fixtures/data_path_with_space/mkdocs.yml
@@ -0,0 +1,7 @@
+site_name: test git_table_reader site
+use_directory_urls: true
+
+plugins:
+ - search
+ - table-reader:
+ data_path: "docs/folder with spaces"
diff --git a/tests/fixtures/table_path_with_space/docs/index.md b/tests/fixtures/table_path_with_space/docs/index.md
new file mode 100644
index 0000000..6edd75d
--- /dev/null
+++ b/tests/fixtures/table_path_with_space/docs/index.md
@@ -0,0 +1,7 @@
+# Test page
+
+This is a table that we load from the docs folder, because we set `data_path` to `docs`:
+
+## inserted with positional argument
+
+{{ read_csv("docs/table name with spaces.csv") }}
diff --git a/tests/fixtures/table_path_with_space/docs/table name with spaces.csv b/tests/fixtures/table_path_with_space/docs/table name with spaces.csv
new file mode 100644
index 0000000..6743b2e
--- /dev/null
+++ b/tests/fixtures/table_path_with_space/docs/table name with spaces.csv
@@ -0,0 +1,5 @@
+"a","b"
+40,73
+50,52
+531456,80
+"name","table1"
\ No newline at end of file
diff --git a/tests/fixtures/table_path_with_space/mkdocs.yml b/tests/fixtures/table_path_with_space/mkdocs.yml
new file mode 100644
index 0000000..1e47be9
--- /dev/null
+++ b/tests/fixtures/table_path_with_space/mkdocs.yml
@@ -0,0 +1,6 @@
+site_name: test git_table_reader site
+use_directory_urls: true
+
+plugins:
+ - search
+ - table-reader
diff --git a/tests/test_build.py b/tests/test_build.py
index 9ea90a9..6da893d 100644
--- a/tests/test_build.py
+++ b/tests/test_build.py
@@ -189,6 +189,36 @@ def test_datapath_trailing(tmp_path):
assert re.search(r"539956", contents)
+def test_datapath_with_spaces(tmp_path):
+
+ tmp_proj = setup_clean_mkdocs_folder(
+ "tests/fixtures/data_path_with_space/mkdocs.yml", tmp_path
+ )
+
+ result = build_docs_setup(tmp_proj)
+ assert result.exit_code == 0, "'mkdocs build' command failed"
+
+ # Make sure the basic_table.csv is inserted
+ page_with_tag = tmp_proj / "site/index.html"
+ contents = page_with_tag.read_text()
+ assert re.search(r"531456", contents)
+
+
+def test_tablepath_with_spaces(tmp_path):
+
+ tmp_proj = setup_clean_mkdocs_folder(
+ "tests/fixtures/table_path_with_space/mkdocs.yml", tmp_path
+ )
+
+ result = build_docs_setup(tmp_proj)
+ assert result.exit_code == 0, "'mkdocs build' command failed"
+
+ # Make sure the basic_table.csv is inserted
+ page_with_tag = tmp_proj / "site/index.html"
+ contents = page_with_tag.read_text()
+ assert re.search(r"531456", contents)
+
+
def test_wrong_path(tmp_path):
tmp_proj = setup_clean_mkdocs_folder(
diff --git a/tests/test_safe_eval.py b/tests/test_safe_eval.py
index 172f4c2..15bcb09 100644
--- a/tests/test_safe_eval.py
+++ b/tests/test_safe_eval.py
@@ -33,7 +33,7 @@ def test_safe_eval5():
def test_parseargkwarg_1():
- s = "title='bah', name='john' purple='haze' none=None i=1"
+ s = "title='bah', name='john', purple='haze', none=None, i=1"
args, kwargs = parse_argkwarg(s)
assert args == []
assert kwargs == {
@@ -73,6 +73,20 @@ def test_parseargkwarg_5():
assert kwargs == {"sep": "\r\t"}
+def test_parseargkwarg_6():
+ s = "'assets/tables/table.csv' , sep = '\r\t'"
+ args, kwargs = parse_argkwarg(s)
+ assert args == ["assets/tables/table.csv"]
+ assert kwargs == {"sep": "\r\t"}
+
+
+def test_parseargkwarg_7():
+ s = "'table with space.csv', sep = '\r\t'"
+ args, kwargs = parse_argkwarg(s)
+ assert args == ["table with space.csv"]
+ assert kwargs == {"sep": "\r\t"}
+
+
def test_parseargkwarg_error():
with pytest.raises(AssertionError):
|
Whitespace in csv path breaks mkDocs build process
Given an abspath, it wmf if there is no whitespace in the path. With whitespace fails in MkDocs /serve build process.
Path is: '/Users/username/Documents/1. Test
```
'page_markdown', page.markdown, page=page, config=config, files=files
File "/usr/local/lib/python3.7/site-packages/mkdocs/plugins.py", line 94, in run_event
result = method(item, **kwargs)
File "/Users/username/Library/Python/3.7/lib/python/site-packages/mkdocs_table_reader_plugin/plugin.py", line 94, in on_page_markdown
pd_args, pd_kwargs = parse_argkwarg(result)
File "/Users/username/Library/Python/3.7/lib/python/site-packages/mkdocs_table_reader_plugin/safe_eval.py", line 86, in parse_argkwarg
args.append(literal_eval(i))
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/ast.py", line 46, in literal_eval
node_or_string = parse(node_or_string, mode='eval')
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "<unknown>", line 1
'/Users/username/Documents/1.
```
|
0.0
|
31f80de456e36005d4dcb5d610cb5ec2e89cabf7
|
[
"tests/test_safe_eval.py::test_parseargkwarg_6",
"tests/test_safe_eval.py::test_parseargkwarg_7"
] |
[
"tests/test_build.py::test_wrong_path",
"tests/test_safe_eval.py::test_safe_eval0",
"tests/test_safe_eval.py::test_safe_eval1",
"tests/test_safe_eval.py::test_safe_eval2",
"tests/test_safe_eval.py::test_safe_eval3",
"tests/test_safe_eval.py::test_safe_eval4",
"tests/test_safe_eval.py::test_safe_eval5",
"tests/test_safe_eval.py::test_parseargkwarg_1",
"tests/test_safe_eval.py::test_parseargkwarg_2",
"tests/test_safe_eval.py::test_parseargkwarg_3",
"tests/test_safe_eval.py::test_parseargkwarg_4",
"tests/test_safe_eval.py::test_parseargkwarg_5",
"tests/test_safe_eval.py::test_parseargkwarg_error"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-07 06:21:03+00:00
|
mit
| 5,917 |
|
tinosulzer__PyBaMM-162
|
diff --git a/docs/source/expression_tree/unary_operator.rst b/docs/source/expression_tree/unary_operator.rst
index 79ece869..13675c49 100644
--- a/docs/source/expression_tree/unary_operator.rst
+++ b/docs/source/expression_tree/unary_operator.rst
@@ -25,6 +25,11 @@ Unary Operators
.. autoclass:: pybamm.NumpyBroadcast
:members:
+.. autoclass:: pybamm.SurfaceValue
+ :members:
+
.. autofunction:: pybamm.grad
.. autofunction:: pybamm.div
+
+.. autofunction:: pybamm.surf
diff --git a/pybamm/__init__.py b/pybamm/__init__.py
index ac4b6da7..98c5b202 100644
--- a/pybamm/__init__.py
+++ b/pybamm/__init__.py
@@ -78,8 +78,10 @@ from .expression_tree.unary_operators import (
Divergence,
Broadcast,
NumpyBroadcast,
+ SurfaceValue,
grad,
div,
+ surf,
)
from .expression_tree.function_parameter import FunctionParameter
from .expression_tree.scalar import Scalar
diff --git a/pybamm/discretisations/discretisation.py b/pybamm/discretisations/discretisation.py
index a572b4fb..18cff23e 100644
--- a/pybamm/discretisations/discretisation.py
+++ b/pybamm/discretisations/discretisation.py
@@ -212,6 +212,13 @@ class Discretisation(object):
)
return symbol
+ elif isinstance(symbol, pybamm.SurfaceValue):
+ child = symbol.children[0]
+ discretised_child = self.process_symbol(child)
+ return self._spatial_methods[symbol.domain[0]].surface_value(
+ discretised_child
+ )
+
elif isinstance(symbol, pybamm.BinaryOperator):
return self.process_binary_operators(symbol)
diff --git a/pybamm/expression_tree/unary_operators.py b/pybamm/expression_tree/unary_operators.py
index 17ce41fd..abe2babb 100644
--- a/pybamm/expression_tree/unary_operators.py
+++ b/pybamm/expression_tree/unary_operators.py
@@ -210,6 +210,16 @@ class NumpyBroadcast(Broadcast):
return child_eval * self.broadcasting_vector
+class SurfaceValue(SpatialOperator):
+ """A node in the expression tree which gets the surface value of a variable.
+
+ **Extends:** :class:`SpatialOperator`
+ """
+
+ def __init__(self, child):
+ super().__init__("surf", child)
+
+
#
# Methods to call Gradient and Divergence
#
@@ -251,3 +261,27 @@ def div(expression):
"""
return Divergence(expression)
+
+
+#
+# Method to call SurfaceValue
+#
+
+
+def surf(variable):
+ """convenience function for creating a :class:`SurfaceValue`
+
+ Parameters
+ ----------
+
+ variable : :class:`Symbol`
+ the surface value of this variable will be returned
+
+ Returns
+ -------
+
+ :class:`GetSurfaceValue`
+ the surface value of ``variable``
+ """
+
+ return SurfaceValue(variable)
diff --git a/pybamm/spatial_methods/finite_volume.py b/pybamm/spatial_methods/finite_volume.py
index b172ad3d..86cc3f68 100644
--- a/pybamm/spatial_methods/finite_volume.py
+++ b/pybamm/spatial_methods/finite_volume.py
@@ -237,6 +237,33 @@ class FiniteVolume(pybamm.SpatialMethod):
left_ghost_cell, discretised_symbol, right_ghost_cell
)
+ def surface_value(self, discretised_symbol):
+ """
+ Uses linear extrapolation to get the surface value of a variable in the
+ Finite Volume Method.
+
+ Parameters
+ -----------
+ discretised_symbol : :class:`pybamm.StateVector`
+ The discretised variable (a state vector) from which to calculate
+ the surface value.
+
+ Returns
+ -------
+ :class:`pybamm.Variable`
+ The variable representing the surface value.
+ """
+ # Better to make class similar NodeToEdge and pass function?
+ # def surface_value(array):
+ # "Linear extrapolation for surface value"
+ # array[-1] + (array[-1] - array[-2]) / 2
+ # ... or make StateVector and add?
+ y_slice_stop = discretised_symbol.y_slice.stop
+ last_node = pybamm.StateVector(slice(y_slice_stop - 1, y_slice_stop))
+ penultimate_node = pybamm.StateVector(slice(y_slice_stop - 2, y_slice_stop - 1))
+ surface_value = (last_node + (last_node - penultimate_node) / 2)
+ return surface_value
+
#######################################################
# Can probably be moved outside of the spatial method
######################################################
diff --git a/pybamm/spatial_methods/spatial_method.py b/pybamm/spatial_methods/spatial_method.py
index 1006ed32..b92104b7 100644
--- a/pybamm/spatial_methods/spatial_method.py
+++ b/pybamm/spatial_methods/spatial_method.py
@@ -102,6 +102,24 @@ class SpatialMethod:
"""
raise NotImplementedError
+ def surface_value(self, discretised_symbol):
+ """
+ Returns the surface value using the approriate expression for the
+ spatial method.
+
+ Parameters
+ -----------
+ discretised_symbol : :class:`pybamm.StateVector`
+ The discretised variable (a state vector) from which to calculate
+ the surface value.
+
+ Returns
+ -------
+ :class:`pybamm.Variable`
+ The variable representing the surface value.
+ """
+ raise NotImplementedError
+
# We could possibly move the following outside of SpatialMethod
# depending on the requirements of the FiniteVolume
|
tinosulzer/PyBaMM
|
b6a3e58f579b1e2b87bb97854e3c902c189e0a7f
|
diff --git a/tests/test_spatial_methods/test_finite_volume.py b/tests/test_spatial_methods/test_finite_volume.py
index 7c7a2e71..ce84a225 100644
--- a/tests/test_spatial_methods/test_finite_volume.py
+++ b/tests/test_spatial_methods/test_finite_volume.py
@@ -32,6 +32,31 @@ class TestFiniteVolume(unittest.TestCase):
avd = pybamm.NodeToEdge(d, arithmetic_mean)
np.testing.assert_array_equal(avd.evaluate(None, y_test), np.ones(9))
+ def test_surface_value(self):
+ # create discretisation
+ defaults = shared.TestDefaults1DParticle(10)
+ spatial_methods = {"negative particle": pybamm.FiniteVolume}
+ disc = pybamm.Discretisation(defaults.mesh, spatial_methods)
+ mesh = disc.mesh
+
+ combined_submesh = mesh.combine_submeshes("negative particle")
+
+ # create variable
+ var = pybamm.Variable("var", domain="negative particle")
+ surf_eqn = pybamm.surf(var)
+ disc._variables = [var]
+ disc.set_variable_slices()
+ surf_eqn_disc = disc.process_symbol(surf_eqn)
+
+ # check constant extrapolates to constant
+ constant_y = np.ones_like(combined_submesh.nodes)
+ self.assertEqual(surf_eqn_disc.evaluate(None, constant_y), 1)
+
+ # check linear variable extrapolates correctly
+ linear_y = combined_submesh.nodes
+ y_surf = combined_submesh.nodes[-1] + combined_submesh.d_nodes[-1] / 2
+ self.assertEqual(surf_eqn_disc.evaluate(None, linear_y), y_surf)
+
def test_discretise_diffusivity_times_spatial_operator(self):
# Set up
whole_cell = ["negative electrode", "separator", "positive electrode"]
|
Get surface values
**Summary**
Implement a class which takes a variable and returns the value at the surface.
**Motivation**
This is required for implementing the SPM #72 as the concentrations on the surface of the particles are needed in the Butler-Volmer expression and the OCPs.
|
0.0
|
b6a3e58f579b1e2b87bb97854e3c902c189e0a7f
|
[
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_surface_value"
] |
[
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_add_ghost_nodes",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_discretise_diffusivity_times_spatial_operator",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_div_convergence",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_div_convergence_internal",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_grad_convergence_with_bcs",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_grad_convergence_without_bcs",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_grad_div_shapes_Dirichlet_bcs",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_grad_div_shapes_Neumann_bcs",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_grad_div_shapes_mixed_domain",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_node_to_edge",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_spherical_grad_div_shapes_Dirichlet_bcs",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_spherical_grad_div_shapes_Neumann_bcs",
"tests/test_spatial_methods/test_finite_volume.py::TestFiniteVolume::test_spherical_operators"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-20 16:52:37+00:00
|
bsd-3-clause
| 5,918 |
|
tinosulzer__PyBaMM-170
|
diff --git a/input/parameters/lithium-ion/parameters/LCO.csv b/input/parameters/lithium-ion/parameters/LCO.csv
index 141cb2ed..9d8b59c2 100644
--- a/input/parameters/lithium-ion/parameters/LCO.csv
+++ b/input/parameters/lithium-ion/parameters/LCO.csv
@@ -51,5 +51,5 @@ alpha,0.5,,Charge transfer coefficient,Scott Moura,
,,,,,
# Initial Conditions,,,,,
ce0,1000,mol.m-3, Initial lithium ion concentration in electrolyte,,
-cn0,1000,mol.m-3, Initial lithium concentration in positive electrode,,
-cp0,1000,mol.m-3, Initial lithium concentration in negative electrode,,
+cn0,1998.6,mol.m-3, Initial lithium concentration in positive electrode,,
+cp0,3073.1,mol.m-3, Initial lithium concentration in negative electrode,,
diff --git a/pybamm/models/li_ion/single_particle_model.py b/pybamm/models/li_ion/single_particle_model.py
index 445b2cc9..fcf2e535 100644
--- a/pybamm/models/li_ion/single_particle_model.py
+++ b/pybamm/models/li_ion/single_particle_model.py
@@ -79,7 +79,7 @@ class SPM(pybamm.BaseModel):
Nn = - gamma_n * D_n(cn) * pybamm.grad(cn)
dcndt = - pybamm.div(Nn)
Np = - gamma_p * D_p(cp) * pybamm.grad(cp)
- dcpdt = -pybamm.div(Np)
+ dcpdt = - pybamm.div(Np)
self.rhs = {cn: dcndt, cp: dcpdt}
# Boundary conditions
@@ -88,7 +88,7 @@ class SPM(pybamm.BaseModel):
Nn: {"left": pybamm.Scalar(0),
"right": pybamm.Scalar(1) / ln / beta_n},
Np: {"left": pybamm.Scalar(0),
- "right": pybamm.Scalar(1) / lp / beta_p / C_hat_p},
+ "right": - pybamm.Scalar(1) / lp / beta_p / C_hat_p},
}
# Initial conditions
|
tinosulzer/PyBaMM
|
9469e1606bb76c7e184da03884ab7abb6c78b402
|
diff --git a/tests/test_models/test_li_ion.py b/tests/test_models/test_li_ion.py
index d14c23c8..e0a1e59c 100644
--- a/tests/test_models/test_li_ion.py
+++ b/tests/test_models/test_li_ion.py
@@ -17,23 +17,24 @@ class TestLiIonSPM(unittest.TestCase):
modeltest.test_all()
+ def test_surface_concentrartion(self):
+ model = pybamm.li_ion.SPM()
+ params = model.default_parameter_values
+ params.process_model(model)
+ disc = model.default_discretisation
+ disc.process_model(model)
+ t_eval = np.linspace(0, 1, 100)
+ solver = model.default_solver
+ solver.solve(model, t_eval)
+ T, Y = solver.t, solver.y
-def test_surface_concentrartion(self):
- model = pybamm.li_ion.SPM()
- disc = model.default_discretisation
- disc.process_model(model)
- t_eval = np.linspace(0, 1, 100)
- solver = model.default_solver
- solver.solve(model, t_eval)
- T, Y = solver.t, solver.y
-
- # check surface concentration decreases in negative particle and
- # increases in positive particle for discharge
- np.testing.assert_array_less(
- model.variables["cn_surf"].evaluate(T, Y)[:, 1:],
- model.variables["cn_surf"].evaluate(T, Y)[:, :-1],
- )
- np.testing.assert_array_less(
- model.variables["cp_surf"].evaluate(T, Y)[:, :-1],
- model.variables["cp_surf"].evaluate(T, Y)[:, 1:],
- )
+ # check surface concentration decreases in negative particle and
+ # increases in positive particle for discharge
+ np.testing.assert_array_less(
+ model.variables["cn_surf"].evaluate(T, Y)[:, 1:],
+ model.variables["cn_surf"].evaluate(T, Y)[:, :-1],
+ )
+ np.testing.assert_array_less(
+ model.variables["cp_surf"].evaluate(T, Y)[:, :-1],
+ model.variables["cp_surf"].evaluate(T, Y)[:, 1:],
+ )
|
Test in SPM failing
One of the [tests](https://github.com/tinosulzer/PyBaMM/blob/61fa60306919edab1bcb6e7c162329217da3311b/tests/test_models/test_li_ion.py#L21) for the SPM isn't inside the testing class so isn't being called, but once put inside the class it fails (`cp_surf` not increasing with time). Both `cn_surf` and `cp_surf` quickly go negative.
My guess is this is due to parameter values being wrong, but could be something else. Could you have a look @rtimms ?
|
0.0
|
9469e1606bb76c7e184da03884ab7abb6c78b402
|
[
"tests/test_models/test_li_ion.py::TestLiIonSPM::test_surface_concentrartion"
] |
[
"tests/test_models/test_li_ion.py::TestLiIonSPM::test_basic_processing"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-22 09:52:20+00:00
|
bsd-3-clause
| 5,919 |
|
tinosulzer__PyBaMM-33
|
diff --git a/pybamm/expression_tree/domain.py b/pybamm/expression_tree/domain.py
index 5ca645b2..75be730f 100644
--- a/pybamm/expression_tree/domain.py
+++ b/pybamm/expression_tree/domain.py
@@ -13,17 +13,19 @@ class Domain(object):
name: str
the name of the node
- domain : iterable of str
+ domain : iterable of str, or str
the list of domains
"""
def __init__(self, name, domain=[]):
super().__init__(name)
+ if isinstance(domain, str):
+ domain = [domain]
try:
iter(domain)
except TypeError:
- raise TypeError('Domain: argument domain is not iterable')
+ raise TypeError("Domain: argument domain is not iterable")
else:
self.domain = domain
|
tinosulzer/PyBaMM
|
a5bee2a9f3f1c231c7cc71d148f468392ada27f8
|
diff --git a/tests/test_expression_tree/test_independent_variable.py b/tests/test_expression_tree/test_independent_variable.py
index 7ce7b560..22db0db9 100644
--- a/tests/test_expression_tree/test_independent_variable.py
+++ b/tests/test_expression_tree/test_independent_variable.py
@@ -13,9 +13,12 @@ class TestIndependentVariable(unittest.TestCase):
a = pybamm.IndependentVariable("a")
self.assertEqual(a.name, "a")
self.assertEqual(a.domain, [])
- a = pybamm.IndependentVariable("a", domain=['test'])
- self.assertEqual(a.domain[0], 'test')
- self.assertRaises(TypeError, pybamm.IndependentVariable("a", domain='test'))
+ a = pybamm.IndependentVariable("a", domain=["test"])
+ self.assertEqual(a.domain[0], "test")
+ a = pybamm.IndependentVariable("a", domain="test")
+ self.assertEqual(a.domain[0], "test")
+ with self.assertRaises(TypeError):
+ pybamm.IndependentVariable("a", domain=1)
if __name__ == "__main__":
|
Domains as strings
**Describe the bug**
The `Domain` class checks that `domain` is an iterable and raises a `TypeError` if it isn't.
However, a `str` domain is not caught by this as `iter("a string") == True`.
This isn't caught by the test in `TestIndependentVariable`; test should be
```python
with self.assertRaises(TypeError):
pybamm.IndependentVariable("a", domain="test")
```
**Suggested fix**
If `domain` is a string, replace with `list(domain)`.
|
0.0
|
a5bee2a9f3f1c231c7cc71d148f468392ada27f8
|
[
"tests/test_expression_tree/test_independent_variable.py::TestIndependentVariable::test_variable_init"
] |
[] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-12-19 17:33:41+00:00
|
bsd-3-clause
| 5,920 |
|
titipata__pubmed_parser-89
|
diff --git a/pubmed_parser/pubmed_oa_parser.py b/pubmed_parser/pubmed_oa_parser.py
index 06b4d25..d05dda9 100644
--- a/pubmed_parser/pubmed_oa_parser.py
+++ b/pubmed_parser/pubmed_oa_parser.py
@@ -145,7 +145,7 @@ def parse_pubmed_xml(path, include_path=False, nxml=False):
pub_day_node = tree.find(".//pub-date/day")
pub_day = pub_day_node.text if pub_day_node is not None else "01"
- subjects_node = tree.findall(".//article-categories.//subj-group/subject")
+ subjects_node = tree.findall(".//article-categories//subj-group/subject")
subjects = list()
if subjects_node is not None:
for s in subjects_node:
|
titipata/pubmed_parser
|
1376aa651f05662742e7e225c831c6ffda0dc91b
|
diff --git a/tests/test_pubmed_oa_parser.py b/tests/test_pubmed_oa_parser.py
index 768c45c..4d44d3f 100644
--- a/tests/test_pubmed_oa_parser.py
+++ b/tests/test_pubmed_oa_parser.py
@@ -14,6 +14,7 @@ def test_parse_pubmed_xml():
assert len(parsed_xml.get("full_title")) > 0
assert parsed_xml.get("pmc") == "3460867"
assert parsed_xml.get("doi") == "10.1371/journal.pone.0046493"
+ assert parsed_xml.get("subjects") == "Research Article; Biology; Biochemistry; Enzymes; Enzyme Metabolism; Lipids; Fatty Acids; Glycerides; Lipid Metabolism; Neutral Lipids; Metabolism; Lipid Metabolism; Proteins; Globular Proteins; Protein Classes; Recombinant Proteins; Biotechnology; Microbiology; Bacterial Pathogens; Bacteriology; Emerging Infectious Diseases; Host-Pathogen Interaction; Microbial Growth and Development; Microbial Metabolism; Microbial Pathogens; Microbial Physiology; Proteomics; Sequence Analysis; Spectrometric Identification of Proteins"
def test_parse_pubmed_paragraph():
|
parse_pubmed_xml does not return "subjects"
**Describe the bug**
Parse PubMed OA XML information appears to have a bug with the "subjects" attribute.
**To Reproduce**
```
dict_out = pp.parse_pubmed_xml(path)
dict_out["subjects"]
```
Will always be empty.
https://github.com/titipata/pubmed_parser/blob/1376aa651f05662742e7e225c831c6ffda0dc91b/pubmed_parser/pubmed_oa_parser.py#L148
I believe the following change will fix the issue.
```
subjects_node = tree.findall(".//article-categories//subj-group/subject")
```
**Expected behavior**
Should return subjects.
|
0.0
|
1376aa651f05662742e7e225c831c6ffda0dc91b
|
[
"tests/test_pubmed_oa_parser.py::test_parse_pubmed_xml"
] |
[
"tests/test_pubmed_oa_parser.py::test_parse_pubmed_paragraph",
"tests/test_pubmed_oa_parser.py::test_parse_pubmed_references",
"tests/test_pubmed_oa_parser.py::test_parse_pubmed_caption"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-04-21 08:50:20+00:00
|
mit
| 5,921 |
|
tlsfuzzer__python-ecdsa-281
|
diff --git a/src/ecdsa/keys.py b/src/ecdsa/keys.py
index 4a673f6..105d0b2 100644
--- a/src/ecdsa/keys.py
+++ b/src/ecdsa/keys.py
@@ -1124,10 +1124,16 @@ class SigningKey(object):
"Non NULL parameters for a EdDSA key"
)
key_str_der, s = der.remove_octet_string(s)
- if s:
- raise der.UnexpectedDER(
- "trailing junk inside the privateKey"
- )
+
+ # As RFC5958 describe, there are may be optional Attributes
+ # and Publickey. Don't raise error if something after
+ # Privatekey
+
+ # TODO parse attributes or validate publickey
+ # if s:
+ # raise der.UnexpectedDER(
+ # "trailing junk inside the privateKey"
+ # )
key_str, s = der.remove_octet_string(key_str_der)
if s:
raise der.UnexpectedDER(
|
tlsfuzzer/python-ecdsa
|
4de8d5bf89089d1140eb99aa5d7eb2dc8e6337b6
|
diff --git a/src/ecdsa/test_keys.py b/src/ecdsa/test_keys.py
index 564f312..0295881 100644
--- a/src/ecdsa/test_keys.py
+++ b/src/ecdsa/test_keys.py
@@ -927,3 +927,13 @@ def test_VerifyingKey_inequality_with_different_secret_points():
sk2 = SigningKey.from_secret_exponent(3, BRAINPOOLP160r1)
assert sk1.verifying_key != sk2.verifying_key
+
+
+def test_SigningKey_from_pem_pkcs8v2_EdDSA():
+ pem = """-----BEGIN PRIVATE KEY-----
+ MFMCAQEwBQYDK2VwBCIEICc2F2ag1n1QP0jY+g9qWx5sDkx0s/HdNi3cSRHw+zsI
+ oSMDIQA+HQ2xCif8a/LMWR2m5HaCm5I2pKe/cc8OiRANMHxjKQ==
+ -----END PRIVATE KEY-----"""
+
+ sk = SigningKey.from_pem(pem)
+ assert sk.curve == Ed25519
|
SigningKey.from_pem may not support pkcs#8 v2
I'm using ecdsa 0.18.0b1, and trying to use it to load private key from pem file.
When I use `SigningKey.from_pem(pem.encode()`, password=None) to load pem file, the pem data is in PKCS#8 v2 format, RFC 5958:
```
-----BEGIN PRIVATE KEY-----
MFMCAQEwBQYDK2VwBCIEIGQqNAZlORmn1k4QrYz1FvO4fOQowS3GXQMqRKDzmx9P
oSMDIQCrO5iGM5hnLWrHavywoXekAoXPpYRuB0Dr6DjZF6FZkg==
-----END PRIVATE KEY-----
```
I got an error:
```
File "/usr/local/lib/python3.9/site-packages/ecdsa/keys.py", line 1020, in from_pem
return cls.from_der(
File "/usr/local/lib/python3.9/site-packages/ecdsa/keys.py", line 1119, in from_der
raise der.UnexpectedDER(
ecdsa.der.UnexpectedDER: trailing junk inside the privateKey
```
Actually, I'm trying to build an application initially written in rust, and convert it to python version. So I need to parse pem file generated by rust version. I used [Ed25519KeyPair::generate_pkcs8](https://docs.rs/ring/0.16.20/ring/signature/struct.Ed25519KeyPair.html#method.generate_pkcs8) to generate keys in PKC8 v2 format
|
0.0
|
4de8d5bf89089d1140eb99aa5d7eb2dc8e6337b6
|
[
"src/ecdsa/test_keys.py::test_SigningKey_from_pem_pkcs8v2_EdDSA"
] |
[
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_array_array_of_bytes",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_array_array_of_bytes_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_array_array_of_ints",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_array_array_of_ints_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytearray",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytearray_compressed",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytearray_uncompressed",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytes",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytes_compressed",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytes_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytes_uncompressed",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromString::test_bytesarray_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_SigningKey_inequality_on_same_curve",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_VerifyingKey_inequality_on_same_curve",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_array_array_of_bytes",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_array_array_of_bytes_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_bytearray",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_bytes",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_bytes_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_bytesarray_memoryview",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_custom_hashfunc",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed25519_VerifyingKey_repr__",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed25519_export_import",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed25519_sig_verify",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed448_export_import",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed448_from_pem",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed448_sig_verify",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_ed448_to_pem",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_edwards_from_public_key_recovery",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_edwards_from_public_key_recovery_with_digest",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_edwards_from_public_point",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_edwards_precompute_no_side_effect",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_equality_on_verifying_keys",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_export_ed255_to_pem",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_from_pem_with_custom_hashfunc",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_from_public_point_old",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_inequality_on_verifying_keys",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_inequality_on_verifying_keys_not_implemented",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_inequality_on_wrong_types",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_load_ed25519_from_pem",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_load_key_with_disabled_format",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_load_key_with_explicit_parameters",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_load_key_with_explicit_with_explicit_disabled",
"src/ecdsa/test_keys.py::TestVerifyingKeyFromDer::test_parse_malfomed_eddsa_der_pubkey",
"src/ecdsa/test_keys.py::TestSigningKey::test_compare_verifying_key_with_precompute",
"src/ecdsa/test_keys.py::TestSigningKey::test_decoding_explicit_curve_parameters",
"src/ecdsa/test_keys.py::TestSigningKey::test_decoding_explicit_curve_parameters_with_explicit_disabled",
"src/ecdsa/test_keys.py::TestSigningKey::test_ed25519_from_pem",
"src/ecdsa/test_keys.py::TestSigningKey::test_ed25519_to_and_from_pem",
"src/ecdsa/test_keys.py::TestSigningKey::test_ed25519_to_pem",
"src/ecdsa/test_keys.py::TestSigningKey::test_ed448_encode_decode",
"src/ecdsa/test_keys.py::TestSigningKey::test_ed448_from_pem",
"src/ecdsa/test_keys.py::TestSigningKey::test_ed448_to_pem",
"src/ecdsa/test_keys.py::TestSigningKey::test_equality_on_signing_keys",
"src/ecdsa/test_keys.py::TestSigningKey::test_inequality_on_signing_keys",
"src/ecdsa/test_keys.py::TestSigningKey::test_inequality_on_signing_keys_not_implemented",
"src/ecdsa/test_keys.py::TestSigningKey::test_verify_with_lazy_precompute",
"src/ecdsa/test_keys.py::TestSigningKey::test_verify_with_precompute",
"src/ecdsa/test_keys.py::TestTrivialCurve::test_deterministic_sign",
"src/ecdsa/test_keys.py::TestTrivialCurve::test_deterministic_sign_random_message",
"src/ecdsa/test_keys.py::TestTrivialCurve::test_deterministic_sign_that_rises_R_zero_error",
"src/ecdsa/test_keys.py::TestTrivialCurve::test_deterministic_sign_that_rises_S_zero_error",
"src/ecdsa/test_keys.py::TestTrivialCurve::test_generator_sanity",
"src/ecdsa/test_keys.py::TestTrivialCurve::test_public_key_sanity",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytes-raw]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytes-raw]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytes-der]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytes-der]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytes-strings]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytes-strings]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytes",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytes",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytearray-raw]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytearray-raw]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytearray-der]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytearray-der]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytearray-strings]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytearray-strings]",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-bytearray",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-bytearray",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify-array.array",
"src/ecdsa/test_keys.py::test_VerifyingKey_verify[verify_digest-array.array",
"src/ecdsa/test_keys.py::test_SigningKey_from_string[bytes]",
"src/ecdsa/test_keys.py::test_SigningKey_from_string[bytes",
"src/ecdsa/test_keys.py::test_SigningKey_from_string[bytearray]",
"src/ecdsa/test_keys.py::test_SigningKey_from_string[bytearray",
"src/ecdsa/test_keys.py::test_SigningKey_from_string[array.array",
"src/ecdsa/test_keys.py::test_SigningKey_from_der[bytes]",
"src/ecdsa/test_keys.py::test_SigningKey_from_der[bytes",
"src/ecdsa/test_keys.py::test_SigningKey_from_der[bytearray]",
"src/ecdsa/test_keys.py::test_SigningKey_from_der[bytearray",
"src/ecdsa/test_keys.py::test_SigningKey_from_der[array.array",
"src/ecdsa/test_keys.py::test_SigningKey_sign_deterministic[bytes]",
"src/ecdsa/test_keys.py::test_SigningKey_sign_deterministic[bytes",
"src/ecdsa/test_keys.py::test_SigningKey_sign_deterministic[bytearray]",
"src/ecdsa/test_keys.py::test_SigningKey_sign_deterministic[bytearray",
"src/ecdsa/test_keys.py::test_SigningKey_sign_deterministic[array.array",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest_deterministic[bytes]",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest_deterministic[bytes",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest_deterministic[bytearray]",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest_deterministic[bytearray",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest_deterministic[array.array",
"src/ecdsa/test_keys.py::test_SigningKey_sign[bytes]",
"src/ecdsa/test_keys.py::test_SigningKey_sign[bytes",
"src/ecdsa/test_keys.py::test_SigningKey_sign[bytearray]",
"src/ecdsa/test_keys.py::test_SigningKey_sign[bytearray",
"src/ecdsa/test_keys.py::test_SigningKey_sign[array.array",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest[bytes]",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest[bytes",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest[bytearray]",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest[bytearray",
"src/ecdsa/test_keys.py::test_SigningKey_sign_digest[array.array",
"src/ecdsa/test_keys.py::test_SigningKey_with_unlikely_value",
"src/ecdsa/test_keys.py::test_SigningKey_with_custom_curve_old_point",
"src/ecdsa/test_keys.py::test_VerifyingKey_inequality_with_different_curves",
"src/ecdsa/test_keys.py::test_VerifyingKey_inequality_with_different_secret_points"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-27 07:22:35+00:00
|
mit
| 5,922 |
|
tmbo__questionary-244
|
diff --git a/examples/advanced_workflow.py b/examples/advanced_workflow.py
index 6f820f8..5b06633 100644
--- a/examples/advanced_workflow.py
+++ b/examples/advanced_workflow.py
@@ -6,6 +6,14 @@ from questionary import prompt
def ask_dictstyle(**kwargs):
questions = [
+ {
+ # just print a message, don't ask a question
+ # does not require a name (but if provided, is ignored) and does not return a value
+ "type": "print",
+ "name": "intro",
+ "message": "This example demonstrates advanced features! 🦄",
+ "style": "bold italic",
+ },
{
"type": "confirm",
"name": "conditional_step",
@@ -27,6 +35,14 @@ def ask_dictstyle(**kwargs):
"message": "Select item",
"choices": ["item1", "item2", Separator(), "other"],
},
+ {
+ # just print a message, don't ask a question
+ # does not require a name and does not return a value
+ "type": "print",
+ "message": "Please enter a value for 'other'",
+ "style": "bold italic fg:darkred",
+ "when": lambda x: x["second_question"] == "other",
+ },
{
"type": "text",
# intentionally overwrites result from previous question
@@ -35,7 +51,7 @@ def ask_dictstyle(**kwargs):
"when": lambda x: x["second_question"] == "other",
},
]
- return prompt(questions)
+ return prompt(questions, **kwargs)
if __name__ == "__main__":
diff --git a/questionary/prompt.py b/questionary/prompt.py
index 0180f0d..5cabcee 100644
--- a/questionary/prompt.py
+++ b/questionary/prompt.py
@@ -11,6 +11,7 @@ from questionary import utils
from questionary.constants import DEFAULT_KBI_MESSAGE
from questionary.prompts import AVAILABLE_PROMPTS
from questionary.prompts import prompt_by_name
+from questionary.prompts.common import print_formatted_text
class PromptParameterException(ValueError):
@@ -143,7 +144,8 @@ def unsafe_prompt(
# import the question
if "type" not in question_config:
raise PromptParameterException("type")
- if "name" not in question_config:
+ # every type except 'print' needs a name
+ if "name" not in question_config and question_config["type"] != "print":
raise PromptParameterException("name")
_kwargs = kwargs.copy()
@@ -151,7 +153,7 @@ def unsafe_prompt(
_type = _kwargs.pop("type")
_filter = _kwargs.pop("filter", None)
- name = _kwargs.pop("name")
+ name = _kwargs.pop("name", None) if _type == "print" else _kwargs.pop("name")
when = _kwargs.pop("when", None)
if true_color:
@@ -172,6 +174,22 @@ def unsafe_prompt(
"'when' needs to be function that accepts a dict argument"
)
+ # handle 'print' type
+ if _type == "print":
+ try:
+ message = _kwargs.pop("message")
+ except KeyError as e:
+ raise PromptParameterException("message") from e
+
+ # questions can take 'input' arg but print_formatted_text does not
+ # Remove 'input', if present, to avoid breaking during tests
+ _kwargs.pop("input", None)
+
+ print_formatted_text(message, **_kwargs)
+ if name:
+ answers[name] = None
+ continue
+
choices = question_config.get("choices")
if choices is not None and callable(choices):
calculated_choices = choices(answers)
|
tmbo/questionary
|
6643fe006f66802d9e504640f26b9e1a0a9d1253
|
diff --git a/tests/test_examples.py b/tests/test_examples.py
index 8fca7fc..274faa6 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -101,3 +101,28 @@ def test_autocomplete_example():
assert result_dict == {"ants": "Polyergus lucidus"}
assert result_py == "Polyergus lucidus"
+
+
+def test_advanced_workflow_example():
+ from examples.advanced_workflow import ask_dictstyle
+
+ text = (
+ KeyInputs.ENTER
+ + "questionary"
+ + KeyInputs.ENTER
+ + KeyInputs.DOWN
+ + KeyInputs.DOWN
+ + KeyInputs.ENTER
+ + "Hello World"
+ + KeyInputs.ENTER
+ + "\r"
+ )
+
+ result_dict = ask_with_patched_input(ask_dictstyle, text)
+
+ assert result_dict == {
+ "intro": None,
+ "conditional_step": True,
+ "next_question": "questionary",
+ "second_question": "Hello World",
+ }
diff --git a/tests/test_prompt.py b/tests/test_prompt.py
index 186bae8..9a9a84b 100644
--- a/tests/test_prompt.py
+++ b/tests/test_prompt.py
@@ -2,6 +2,7 @@ import pytest
from questionary.prompt import PromptParameterException
from questionary.prompt import prompt
+from tests.utils import patched_prompt
def test_missing_message():
@@ -47,3 +48,31 @@ def test_invalid_question_type():
}
]
)
+
+
+def test_missing_print_message():
+ """Test 'print' raises exception if missing 'message'"""
+ with pytest.raises(PromptParameterException):
+ prompt(
+ [
+ {
+ "name": "test",
+ "type": "print",
+ }
+ ]
+ )
+
+
+def test_print_no_name():
+ """'print' type doesn't require a name so it
+ should not throw PromptParameterException"""
+ questions = [{"type": "print", "message": "Hello World"}]
+ result = patched_prompt(questions, "")
+ assert result == {}
+
+
+def test_print_with_name():
+ """'print' type should return {name: None} when name is provided"""
+ questions = [{"name": "hello", "type": "print", "message": "Hello World"}]
+ result = patched_prompt(questions, "")
+ assert result == {"hello": None}
|
Add print question type to dictionary style prompt
### Describe the problem
With version `1.10.0` when I want to use both prompt with dictionary and `print` question type, I need to call several prompts and to merge answers:
```python
from pprint import pprint
from questionary import Separator, prompt
from questionary import print as qprint
def ask_dictstyle(**kwargs):
questions = [
{
"type": "text",
"name": "first",
"message": "Your first message:",
},
{
"type": "text",
"name": "second",
"message": "Your second message",
},
]
answers = prompt(questions)
qprint("Oh! I need to give somme fancy explanations about what’s next!🦄", style="bold italic fg:darkred")
questions = [
{
"type": "select",
"name": "third",
"message": "Select item",
"choices": ["item1", "item2", Separator(), "other"],
},
]
answers.update(prompt(questions))
return answers
if __name__ == "__main__":
pprint(ask_dictstyle())
```
### Describe the solution
Adding `print` type to prompt dictionary would resolve the issue:
```python
from pprint import pprint
from questionary import Separator, prompt
def ask_dictstyle(**kwargs):
questions = [
{
"type": "text",
"name": "first",
"message": "Your first message:",
},
{
"type": "text",
"name": "second",
"message": "Your second message",
},
{
"type": "print",
"name": "help", # Do I need a name?
"message": "Oh! I need to give somme fancy explanations about what’s next!🦄",
"style": "bold italic fg:darkred",
},
{
"type": "select",
"name": "third",
"message": "Select item",
"choices": ["item1", "item2", Separator(), "other"],
},
]
return prompt(questions)
if __name__ == "__main__":
pprint(ask_dictstyle())
```
### Alternatives considered
_No response_
|
0.0
|
6643fe006f66802d9e504640f26b9e1a0a9d1253
|
[
"tests/test_examples.py::test_advanced_workflow_example",
"tests/test_prompt.py::test_missing_print_message",
"tests/test_prompt.py::test_print_no_name",
"tests/test_prompt.py::test_print_with_name"
] |
[
"tests/test_examples.py::test_confirm_example",
"tests/test_examples.py::test_text_example",
"tests/test_examples.py::test_select_example",
"tests/test_examples.py::test_rawselect_example",
"tests/test_examples.py::test_checkbox_example",
"tests/test_examples.py::test_password_example",
"tests/test_examples.py::test_autocomplete_example",
"tests/test_prompt.py::test_missing_message",
"tests/test_prompt.py::test_missing_type",
"tests/test_prompt.py::test_missing_name",
"tests/test_prompt.py::test_invalid_question_type"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-08 01:38:10+00:00
|
mit
| 5,923 |
|
tmbo__questionary-330
|
diff --git a/questionary/prompts/checkbox.py b/questionary/prompts/checkbox.py
index 9523465..25161e4 100644
--- a/questionary/prompts/checkbox.py
+++ b/questionary/prompts/checkbox.py
@@ -38,6 +38,7 @@ def checkbox(
use_jk_keys: bool = True,
use_emacs_keys: bool = True,
instruction: Optional[str] = None,
+ show_description: bool = True,
**kwargs: Any,
) -> Question:
"""Ask the user to select from a list of items.
@@ -106,6 +107,8 @@ def checkbox(
`Ctrl+N` (down) and `Ctrl+P` (up) keys.
instruction: A message describing how to navigate the menu.
+ show_description: Display description of current selection if available.
+
Returns:
:class:`Question`: Question instance, ready to be prompted (using ``.ask()``).
"""
@@ -130,7 +133,11 @@ def checkbox(
raise ValueError("validate must be callable")
ic = InquirerControl(
- choices, default, pointer=pointer, initial_choice=initial_choice
+ choices,
+ default,
+ pointer=pointer,
+ initial_choice=initial_choice,
+ show_description=show_description,
)
def get_prompt_tokens() -> List[Tuple[str, str]]:
diff --git a/questionary/prompts/common.py b/questionary/prompts/common.py
index e682f48..827629d 100644
--- a/questionary/prompts/common.py
+++ b/questionary/prompts/common.py
@@ -53,6 +53,8 @@ class Choice:
checked: Preselect this choice when displaying the options.
shortcut_key: Key shortcut used to select this item.
+
+ description: Optional description of the item that can be displayed.
"""
title: FormattedText
@@ -70,6 +72,9 @@ class Choice:
shortcut_key: Optional[str]
"""A shortcut key for the choice"""
+ description: Optional[str]
+ """Choice description"""
+
def __init__(
self,
title: FormattedText,
@@ -77,10 +82,12 @@ class Choice:
disabled: Optional[str] = None,
checked: Optional[bool] = False,
shortcut_key: Optional[Union[str, bool]] = True,
+ description: Optional[str] = None,
) -> None:
self.disabled = disabled
self.title = title
self.checked = checked if checked is not None else False
+ self.description = description
if value is not None:
self.value = value
@@ -124,6 +131,7 @@ class Choice:
c.get("disabled", None),
c.get("checked"),
c.get("key"),
+ c.get("description", None),
)
def get_shortcut_title(self):
@@ -202,6 +210,7 @@ class InquirerControl(FormattedTextControl):
pointer: Optional[str]
pointed_at: int
is_answered: bool
+ show_description: bool
def __init__(
self,
@@ -211,6 +220,7 @@ class InquirerControl(FormattedTextControl):
use_indicator: bool = True,
use_shortcuts: bool = False,
show_selected: bool = False,
+ show_description: bool = True,
use_arrow_keys: bool = True,
initial_choice: Optional[Union[str, Choice, Dict[str, Any]]] = None,
**kwargs: Any,
@@ -218,6 +228,7 @@ class InquirerControl(FormattedTextControl):
self.use_indicator = use_indicator
self.use_shortcuts = use_shortcuts
self.show_selected = show_selected
+ self.show_description = show_description
self.use_arrow_keys = use_arrow_keys
self.default = default
self.pointer = pointer
@@ -417,9 +428,9 @@ class InquirerControl(FormattedTextControl):
for i, c in enumerate(self.choices):
append(i, c)
- if self.show_selected:
- current = self.get_pointed_at()
+ current = self.get_pointed_at()
+ if self.show_selected:
answer = current.get_shortcut_title() if self.use_shortcuts else ""
answer += (
@@ -427,8 +438,16 @@ class InquirerControl(FormattedTextControl):
)
tokens.append(("class:text", " Answer: {}".format(answer)))
- else:
+
+ show_description = self.show_description and current.description is not None
+ if show_description:
+ tokens.append(
+ ("class:text", " Description: {}".format(current.description))
+ )
+
+ if not (self.show_selected or show_description):
tokens.pop() # Remove last newline.
+
return tokens
def is_selection_a_separator(self) -> bool:
diff --git a/questionary/prompts/select.py b/questionary/prompts/select.py
index d6d41c5..402acbf 100644
--- a/questionary/prompts/select.py
+++ b/questionary/prompts/select.py
@@ -35,6 +35,7 @@ def select(
use_jk_keys: bool = True,
use_emacs_keys: bool = True,
show_selected: bool = False,
+ show_description: bool = True,
instruction: Optional[str] = None,
**kwargs: Any,
) -> Question:
@@ -110,6 +111,8 @@ def select(
show_selected: Display current selection choice at the bottom of list.
+ show_description: Display description of current selection if available.
+
Returns:
:class:`Question`: Question instance, ready to be prompted (using ``.ask()``).
"""
@@ -150,6 +153,7 @@ def select(
use_indicator=use_indicator,
use_shortcuts=use_shortcuts,
show_selected=show_selected,
+ show_description=show_description,
use_arrow_keys=use_arrow_keys,
initial_choice=default,
)
@@ -200,7 +204,7 @@ def select(
"for movement are disabled. "
"This choice is not reachable.".format(c.title)
)
- if isinstance(c, Separator) or c.shortcut_key is None:
+ if isinstance(c, Separator) or c.shortcut_key is None or c.disabled:
continue
# noinspection PyShadowingNames
|
tmbo/questionary
|
e74e9fadf43372085486083f8cc33b5af9ffdc54
|
diff --git a/tests/prompts/test_common.py b/tests/prompts/test_common.py
index d6ce51e..3250079 100644
--- a/tests/prompts/test_common.py
+++ b/tests/prompts/test_common.py
@@ -222,3 +222,43 @@ def test_print_with_style(monkeypatch):
assert mock.method_calls[1][0] == "write"
assert mock.method_calls[1][1][0] == "Hello World"
+
+
+def test_prompt_show_description():
+ ic = InquirerControl(
+ ["a", Choice("b", description="B")],
+ show_selected=True,
+ show_description=True,
+ )
+
+ expected_tokens = [
+ ("class:pointer", " » "),
+ ("[SetCursorPosition]", ""),
+ ("class:text", "○ "),
+ ("class:highlighted", "a"),
+ ("", "\n"),
+ ("class:text", " "),
+ ("class:text", "○ "),
+ ("class:text", "b"),
+ ("", "\n"),
+ ("class:text", " Answer: a"),
+ ]
+ assert ic.pointed_at == 0
+ assert ic._get_choice_tokens() == expected_tokens
+
+ ic.select_next()
+ expected_tokens = [
+ ("class:text", " "),
+ ("class:text", "○ "),
+ ("class:text", "a"),
+ ("", "\n"),
+ ("class:pointer", " » "),
+ ("[SetCursorPosition]", ""),
+ ("class:text", "○ "),
+ ("class:highlighted", "b"),
+ ("", "\n"),
+ ("class:text", " Answer: b"),
+ ("class:text", " Description: B"),
+ ]
+ assert ic.pointed_at == 1
+ assert ic._get_choice_tokens() == expected_tokens
|
[ENH] Include `show_description` to `InquirerControl`
### Describe the problem
Well done on the project! I've been utilizing it extensively for dynamic CLI design.
Sometimes, CLI or workflow questions can be intricate, and supplying additional information regarding options can substantially enhance the user experience and effectiveness. I suggest integrating a `show_description` option into the `InquirerControl` class. This option would allow developers to include a brief description of the presently selected option, which would appear at the bottom of the current question, akin to the show_selected feature.
Integrating this option would enable users to rapidly comprehend each option's objective and make informed decisions, leading to a more user-friendly and efficient interface. This would aid in creating more insightful interactions within CLI or advanced workflows without any detrimental consequences.
For example, using the sample example from [select docs](https://questionary.readthedocs.io/en/stable/pages/types.html#select), we could have something as:
```python
import questionary
choice_options = [
{
"name": "Medium",
"value": "$15",
"description": "Available toppings: cheese, pepperoni, sausage"
},
{
"name": "Big",
"value": "$20",
"description": "Available toppings: cheese, mushrooms, onions, peppers, pineapple, ham"
},
]
question = select(
"Select a pizza size: (see descriptions for toppings)",
choices=choice_options,
show_descriptions=True,
).ask()
```

### Describe the solution
- Extend Choices to receive the extra `description` attribute
- Extend `questionary.select` and `InquirerControl` to accept the new `show_description` flag
### Alternatives considered
_No response_
|
0.0
|
e74e9fadf43372085486083f8cc33b5af9ffdc54
|
[
"tests/prompts/test_common.py::test_prompt_show_description"
] |
[
"tests/prompts/test_common.py::test_to_many_choices_for_shortcut_assignment",
"tests/prompts/test_common.py::test_validator_bool_function",
"tests/prompts/test_common.py::test_validator_bool_function_fails",
"tests/prompts/test_common.py::test_validator_instance",
"tests/prompts/test_common.py::test_validator_instance_fails",
"tests/prompts/test_common.py::test_blank_line_fix",
"tests/prompts/test_common.py::test_prompt_highlight_coexist",
"tests/prompts/test_common.py::test_prompt_show_answer_with_shortcuts",
"tests/prompts/test_common.py::test_print",
"tests/prompts/test_common.py::test_print_with_style"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-19 09:02:06+00:00
|
mit
| 5,924 |
|
tmcclintock__BetterViolinPlots-5
|
diff --git a/bvp/plots.py b/bvp/plots.py
index 1e3bd15..63ca96a 100644
--- a/bvp/plots.py
+++ b/bvp/plots.py
@@ -17,20 +17,31 @@ def _xy_order(domain: List, dist: List, vertical_violin: bool):
return domain, dist
-def _plot_from_x_dist(axis, x, y, index, kwargs, vertical_violins):
+def _plot_from_x_dist(
+ axis, x, y, index, kwargs, vertical_violins, sides="both"
+):
scale = 0.4 / y.max()
# left side
- axis.plot(
- *_xy_order(x, index - y * scale, vertical_violins), **kwargs,
- )
- # right side
- axis.plot(
- *_xy_order(x, index + y * scale, vertical_violins), **kwargs,
- )
+ if sides in ["both", "left", "top"]:
+ axis.plot(
+ *_xy_order(x, index - y * scale, vertical_violins), **kwargs,
+ )
+ if sides in ["both", "right", "bottom"]:
+ # right side
+ axis.plot(
+ *_xy_order(x, index + y * scale, vertical_violins), **kwargs,
+ )
return
-def _preamble(data, axis, plot_kwargs, positions, vertical_violins):
+def _preamble(
+ data, axis, plot_kwargs, positions, vertical_violins, sides="both"
+):
+ if vertical_violins is True:
+ assert sides in ["both", "left", "right"]
+ else: # horizontal violins
+ assert sides in ["both", "top", "bottom"]
+
if axis is None:
fig, axis = plt.subplots()
else:
@@ -58,6 +69,7 @@ def analytic_violin(
positions: Optional[List[int]] = None,
axis: Optional["mpl.axes.Axes"] = None,
vertical_violins: bool = True,
+ sides: str = "both",
plot_kwargs: Union[Dict[str, Any], List[Dict[str, Any]]] = {
"color": "black",
},
@@ -86,6 +98,7 @@ def analytic_violin(
positions (Optional[List[int]]): locations to plot the violins
axis (mpl.axes.Axes): axis to use for plotting, default `None`
vertical_violins (bool): flag to indicate orientation
+ sides (str): string to indicate where to put the plot
plot_kwargs (Dict or List): if Dict, a dictionary of keyword-value
pairs to pass to each plot routine. If List, it is a list of
Dict objects to pass, one for each plot routine
@@ -94,7 +107,7 @@ def analytic_violin(
interval (Optional[List[float]]): plotting interval; default `None`
"""
fig, axis, positions = _preamble(
- distributions, axis, plot_kwargs, positions, vertical_violins
+ distributions, axis, plot_kwargs, positions, vertical_violins, sides,
)
if sigma is not None and interval is not None:
@@ -131,11 +144,15 @@ def analytic_violin(
for j in range(1, len(xs)):
x = np.hstack((x, [xs[j], xs[j] + 1]))
y = np.hstack((y, [ys[j] * scale, ys[j] * scale]))
- _plot_from_x_dist(axis, x, y, i, kwargs, vertical_violins)
+ _plot_from_x_dist(
+ axis, x, y, i, kwargs, vertical_violins, sides
+ )
elif isinstance(d.dist, rv_continuous):
x = np.linspace(min(interval), max(interval), 1000)
y = d.pdf(x)
- _plot_from_x_dist(axis, x, y, i, kwargs, vertical_violins)
+ _plot_from_x_dist(
+ axis, x, y, i, kwargs, vertical_violins, sides
+ )
else: # need to do random draws
raise NotImplementedError(
"only scipy.stats distributions supported"
@@ -153,6 +170,7 @@ def kde_violin(
positions: Optional[List[int]] = None,
axis: Optional["mpl.axes.Axes"] = None,
vertical_violins: bool = True,
+ sides: str = "both",
plot_kwargs: Union[Dict[str, Any], List[Dict[str, Any]]] = {
"color": "black",
},
@@ -197,7 +215,7 @@ def kde_violin(
points = np.atleast_2d(points)
fig, axis, positions = _preamble(
- points, axis, plot_kwargs, positions, vertical_violins
+ points, axis, plot_kwargs, positions, vertical_violins, sides
)
if sigma is not None and interval is not None:
@@ -230,7 +248,7 @@ def kde_violin(
# Make the domain and range
x = np.linspace(min(interval), max(interval), 1000)
y = kde(x)
- _plot_from_x_dist(axis, x, y, i, kwargs, vertical_violins)
+ _plot_from_x_dist(axis, x, y, i, kwargs, vertical_violins, sides)
return fig, axis
|
tmcclintock/BetterViolinPlots
|
3d94f7285ea7af3a6c599d9c3c5344e6d4e4f661
|
diff --git a/tests/test_plots.py b/tests/test_plots.py
index 5eb67f4..ab3b8b7 100644
--- a/tests/test_plots.py
+++ b/tests/test_plots.py
@@ -36,6 +36,14 @@ class analytic_violin_test(TestCase):
with pytest.raises(AssertionError):
analytic_violin(self.dists, plot_kwargs=[{}, {}, {}])
+ def test_sides_asserts(self):
+ with pytest.raises(AssertionError):
+ analytic_violin(self.dists, sides="top", vertical_violins=True)
+ with pytest.raises(AssertionError):
+ analytic_violin(self.dists, sides="left", vertical_violins=False)
+ with pytest.raises(AssertionError):
+ analytic_violin(self.dists, sides="blag")
+
class kde_violin_test(TestCase):
def setUp(self):
@@ -56,6 +64,14 @@ class kde_violin_test(TestCase):
with pytest.raises(AssertionError):
kde_violin(self.samples, plot_kwargs=[{}, {}, {}])
+ def test_sides_asserts(self):
+ with pytest.raises(AssertionError):
+ kde_violin(self.dists, sides="top", vertical_violins=True)
+ with pytest.raises(AssertionError):
+ kde_violin(self.dists, sides="left", vertical_violins=False)
+ with pytest.raises(AssertionError):
+ kde_violin(self.dists, sides="blag")
+
class boxplot_test(TestCase):
def setUp(self):
|
Add single-sided violins
Add an argument to all violin plotting functions to allow someone to specify left, right, up or down violin sides only (depending on the orientation).
For instance:
```python
def analytic_violin(..., sides="both", vertical_violins=True):
if vertical_violins is True:
assert sides in ["both", "left", "right"]
else: # horizontal violins
assert sides in ["both", "top", "bottom"]
# 'sides' affects the plotting function as well.
...
```
|
0.0
|
3d94f7285ea7af3a6c599d9c3c5344e6d4e4f661
|
[
"tests/test_plots.py::analytic_violin_test::test_sides_asserts",
"tests/test_plots.py::kde_violin_test::test_sides_asserts"
] |
[
"tests/test_plots.py::analytic_violin_test::test_asserts",
"tests/test_plots.py::analytic_violin_test::test_smoke_continuous",
"tests/test_plots.py::analytic_violin_test::test_smoke_discrete",
"tests/test_plots.py::kde_violin_test::test_asserts",
"tests/test_plots.py::kde_violin_test::test_smoke_continuous",
"tests/test_plots.py::boxplot_test::test_asserts",
"tests/test_plots.py::boxplot_test::test_smoke_basic",
"tests/test_plots.py::boxplot_test::test_smoke_kwargs",
"tests/test_plots.py::_xy_order_test::test_correct_orders"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-12 23:09:04+00:00
|
mit
| 5,925 |
|
tmcclintock__donjuan-24
|
diff --git a/donjuan/__init__.py b/donjuan/__init__.py
index c959a4b..12928b9 100644
--- a/donjuan/__init__.py
+++ b/donjuan/__init__.py
@@ -6,4 +6,5 @@ from .door_space import Archway, Door, DoorSpace, Portcullis
from .dungeon import Dungeon
from .face import BareFace, DoorFace, Face, Faces, HexFaces, SquareFaces
from .grid import Grid, HexGrid, SquareGrid
+from .randomizer import RandomFilled, Randomizer
from .renderer import BaseRenderer, Renderer
diff --git a/donjuan/randomizer.py b/donjuan/randomizer.py
new file mode 100644
index 0000000..5b50d59
--- /dev/null
+++ b/donjuan/randomizer.py
@@ -0,0 +1,43 @@
+import random
+from typing import Optional
+
+from donjuan import Cell, Grid
+
+
+class Randomizer:
+ """
+ Class for randomizing features of a dungeon.
+ """
+
+ def randomize_cell(self, cell: Cell) -> None:
+ """Randomize properties of the `Cell`"""
+ pass # pragma: no cover
+
+ def randomize_grid(self, grid: Grid) -> None:
+ """Randomize properties of the `Grid`"""
+ pass # pragma: no cover
+
+ @classmethod
+ def seed(cls, seed: Optional[int] = None) -> None:
+ """
+ Args:
+ seed (Optional[int]): seed passed to :meth:`random.seed`
+ """
+ random.seed(seed)
+
+
+class RandomFilled(Randomizer):
+ """
+ Randomly set the :attr:`filled` attribute of cells.
+ """
+
+ def randomize_cell(self, cell: Cell) -> None:
+ """Randomly fill the cell with probability 50%"""
+ cell.filled = bool(random.randint(0, 1))
+
+ def randomize_grid(self, grid: Grid) -> None:
+ """Randomly fill all cells of the grid individually"""
+ for i in range(grid.n_rows):
+ for j in range(grid.n_cols):
+ self.randomize_cell(grid.cells[i][j])
+ return
diff --git a/examples/random_fill.py b/examples/random_fill.py
new file mode 100644
index 0000000..44378f3
--- /dev/null
+++ b/examples/random_fill.py
@@ -0,0 +1,22 @@
+import os
+
+import matplotlib as mpl
+
+from donjuan import Dungeon, RandomFilled, Renderer
+
+# Instantiate donjuan objects
+renderer = Renderer()
+dungeon = Dungeon(n_rows=4, n_cols=5)
+rng = RandomFilled()
+rng.randomize_grid(dungeon.grid)
+
+# Render the image
+file_path = "test.png"
+renderer.render(dungeon, file_path)
+
+# Look at it
+mpl.image.imread(file_path)
+mpl.pyplot.show()
+
+# Delete the image from on disk
+os.remove(file_path)
|
tmcclintock/donjuan
|
3355bdd64e810f382e9293978a6622641f85cd86
|
diff --git a/tests/randomizer_test.py b/tests/randomizer_test.py
new file mode 100644
index 0000000..6751161
--- /dev/null
+++ b/tests/randomizer_test.py
@@ -0,0 +1,54 @@
+from unittest import TestCase
+
+from donjuan import HexGrid, RandomFilled, Randomizer, SquareGrid
+
+
+class RandomizerTestCase(TestCase):
+ def setUp(self):
+ self.grid = SquareGrid(n_rows=4, n_cols=5)
+ self.hexgrid = HexGrid(n_rows=4, n_cols=5)
+
+
+class RandomizerTest(RandomizerTestCase):
+ def test_smoke(self):
+ rng = Randomizer()
+ assert rng is not None
+
+ def test_seed_passes(self):
+ Randomizer.seed(0)
+
+
+class RandomFilledTest(RandomizerTestCase):
+ def test_smoke(self):
+ rng = RandomFilled()
+ assert rng is not None
+
+ def test_filled(self):
+ rng = RandomFilled()
+ rng.seed(12345)
+ grid = self.grid
+ for i in range(grid.n_rows):
+ for j in range(grid.n_cols):
+ assert not grid.cells[i][j].filled
+ rng.randomize_grid(grid)
+ # Test that at least one cell became filled
+ for i in range(grid.n_rows):
+ for j in range(grid.n_cols):
+ if grid.cells[i][j].filled:
+ break
+ assert grid.cells[i][j].filled
+
+ def test_filled_hex(self):
+ rng = RandomFilled()
+ rng.seed(12345)
+ grid = self.hexgrid
+ for i in range(grid.n_rows):
+ for j in range(grid.n_cols):
+ assert not grid.cells[i][j].filled
+ rng.randomize_grid(grid)
+ # Test that at least one cell became filled
+ for i in range(grid.n_rows):
+ for j in range(grid.n_cols):
+ if grid.cells[i][j].filled:
+ break
+ assert grid.cells[i][j].filled
|
Create the Randomizer class
The `Randomizer` class should handle actually randomizing parts of the `Dungeon`. It should do things like create rooms and hallways, add `Doorspace` objects, and add content to `Cell`s (e.g. lightsources, chests, tokens/actors, whatever).
This is a placeholder issue that can be specified once more of the `Dungeon` class is complete.
Note: the `Randomizer` class should have "seed" as an optional argument. This will allow for re-generating (regenerating?) the same dungeons repeatedly.
|
0.0
|
3355bdd64e810f382e9293978a6622641f85cd86
|
[
"tests/randomizer_test.py::RandomizerTest::test_seed_passes",
"tests/randomizer_test.py::RandomizerTest::test_smoke",
"tests/randomizer_test.py::RandomFilledTest::test_filled",
"tests/randomizer_test.py::RandomFilledTest::test_filled_hex",
"tests/randomizer_test.py::RandomFilledTest::test_smoke"
] |
[] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-22 02:47:36+00:00
|
cc0-1.0
| 5,926 |
|
tmcclintock__donjuan-27
|
diff --git a/donjuan/__init__.py b/donjuan/__init__.py
index 12928b9..c3f509d 100644
--- a/donjuan/__init__.py
+++ b/donjuan/__init__.py
@@ -8,3 +8,4 @@ from .face import BareFace, DoorFace, Face, Faces, HexFaces, SquareFaces
from .grid import Grid, HexGrid, SquareGrid
from .randomizer import RandomFilled, Randomizer
from .renderer import BaseRenderer, Renderer
+from .room import Room
diff --git a/donjuan/cell.py b/donjuan/cell.py
index f008631..2f0bcb3 100644
--- a/donjuan/cell.py
+++ b/donjuan/cell.py
@@ -1,5 +1,5 @@
from abc import ABC
-from typing import Any, List, Optional
+from typing import Any, List, Optional, Tuple
from donjuan.door_space import DoorSpace
from donjuan.face import Faces, HexFaces, SquareFaces
@@ -18,11 +18,28 @@ class Cell(ABC):
filled: bool = False,
door_space: Optional[DoorSpace] = None,
contents: Optional[List[Any]] = None,
+ coordinates: Optional[Tuple[int, int]] = None,
):
self.faces = faces
self.filled = filled
self.door_space = door_space
self.contents = contents or []
+ self._coordinates = coordinates
+
+ def set_coordinates(self, x: int, y: int) -> None:
+ self._coordinates = (int(x), int(y))
+
+ @property
+ def coordinates(self) -> Tuple[int, int]:
+ return self._coordinates
+
+ @property
+ def x(self) -> int:
+ return self._coordinates[0]
+
+ @property
+ def y(self) -> int:
+ return self._coordinates[1]
@property
def n_sides(self) -> int:
@@ -47,10 +64,15 @@ class SquareCell(Cell):
filled: bool = False,
door_space: Optional[DoorSpace] = None,
contents: Optional[List[Any]] = None,
+ coordinates: Optional[Tuple[int, int]] = None,
):
faces = faces or SquareFaces()
super().__init__(
- faces=faces, filled=filled, door_space=door_space, contents=contents
+ faces=faces,
+ filled=filled,
+ door_space=door_space,
+ contents=contents,
+ coordinates=coordinates,
)
@@ -72,8 +94,13 @@ class HexCell(Cell):
filled: bool = False,
door_space: Optional[DoorSpace] = None,
contents: Optional[List[Any]] = None,
+ coordinates: Optional[Tuple[int, int]] = None,
):
faces = faces or HexFaces()
super().__init__(
- faces=faces, filled=filled, door_space=door_space, contents=contents
+ faces=faces,
+ filled=filled,
+ door_space=door_space,
+ contents=contents,
+ coordinates=coordinates,
)
diff --git a/donjuan/dungeon.py b/donjuan/dungeon.py
index c2b9fce..2e3ffb7 100644
--- a/donjuan/dungeon.py
+++ b/donjuan/dungeon.py
@@ -1,6 +1,7 @@
-from typing import Optional
+from typing import Dict, Optional
from donjuan.grid import Grid, SquareGrid
+from donjuan.room import Room
class Dungeon:
@@ -9,5 +10,15 @@ class Dungeon:
n_rows: Optional[int] = 5,
n_cols: Optional[int] = 5,
grid: Optional[Grid] = None,
+ rooms: Dict[str, Room] = dict(),
):
- self.grid = grid if grid else SquareGrid(n_rows, n_cols)
+ self._grid = grid or SquareGrid(n_rows, n_cols)
+ self._rooms = rooms
+
+ @property
+ def grid(self) -> Grid:
+ return self._grid
+
+ @property
+ def rooms(self) -> Dict[str, Room]:
+ return self._rooms
diff --git a/donjuan/grid.py b/donjuan/grid.py
index 3f04e0b..5a139ac 100644
--- a/donjuan/grid.py
+++ b/donjuan/grid.py
@@ -16,7 +16,8 @@ class Grid(ABC):
assert n_rows > 1
assert n_cols > 1
cells = cells or [
- [self.cell_type() for i in range(n_cols)] for j in range(n_rows)
+ [self.cell_type(coordinates=(i, j)) for j in range(n_cols)]
+ for i in range(n_rows)
]
assert len(cells) == n_rows, f"{len(cells)} vs {n_rows}"
assert len(cells[0]) == n_cols, f"{len(cells[0])} vs {n_cols}"
@@ -56,6 +57,17 @@ class Grid(ABC):
assert isinstance(cells[0][0], cls.cell_type), msg
return cls(len(cells), len(cells[0]), cells)
+ def reset_cell_coordinates(self) -> None:
+ """
+ Helper function that sets the coordinates of the cells in the grid
+ to their index values. Useful if a grid was created by
+ :meth:`from_cells`.
+ """
+ for i in range(self.n_rows):
+ for j in range(self.n_cols):
+ self.cells[i][j].set_coordinates(i, j)
+ return
+
class SquareGrid(Grid):
"""
diff --git a/donjuan/room.py b/donjuan/room.py
new file mode 100644
index 0000000..8e36b52
--- /dev/null
+++ b/donjuan/room.py
@@ -0,0 +1,36 @@
+from itertools import chain
+from typing import List, Optional
+
+from donjuan import Cell
+
+
+class Room:
+ def __init__(self, cells: Optional[List[List[Cell]]] = None):
+ self._cells = cells or [[]]
+ for cell in chain.from_iterable(self._cells):
+ assert cell.coordinates is not None, "room cell must have coordinates"
+
+ @property
+ def cells(self) -> List[List[Cell]]:
+ return self._cells
+
+ def overlaps(self, other: "Room") -> bool:
+ """
+ Compare the cells of this room to the other room to determine
+ whether they overlap or not. Note, this algorithm is ``O(N*M)``
+ where ``N`` is the number of cells in this room and ``M`` is
+ the number of cells in the other room.
+
+ Args:
+ other (Room): other room to check against
+
+ Returns:
+ ``True`` if they overlap, ``False`` if not
+ """
+ # Loop over all of this room's cells
+ for c1 in chain.from_iterable(self.cells):
+ for c2 in chain.from_iterable(other.cells):
+ if c1.coordinates == c2.coordinates:
+ return True
+ # No overlap
+ return False
|
tmcclintock/donjuan
|
412d275b8441cf88a1dbdfb3791737fd781d946d
|
diff --git a/tests/cell_test.py b/tests/cell_test.py
index d5a1437..0dbee62 100644
--- a/tests/cell_test.py
+++ b/tests/cell_test.py
@@ -22,6 +22,14 @@ class SquareCellTest(TestCase):
c = SquareCell()
assert c.n_sides == 4
+ def test_coordinates(self):
+ c = SquareCell()
+ assert c.coordinates is None
+ c.set_coordinates(1, 2)
+ assert c.coordinates == (1, 2)
+ assert c.x == 1
+ assert c.y == 2
+
class HexCellTest(TestCase):
def test_smoke(self):
@@ -35,3 +43,11 @@ class HexCellTest(TestCase):
def test_n_sides(self):
c = HexCell()
assert c.n_sides == 6
+
+ def test_coordinates(self):
+ c = HexCell()
+ assert c.coordinates is None
+ c.set_coordinates(1, 2)
+ assert c.coordinates == (1, 2)
+ assert c.x == 1
+ assert c.y == 2
diff --git a/tests/dungeon_test.py b/tests/dungeon_test.py
index f4649a9..733ba44 100644
--- a/tests/dungeon_test.py
+++ b/tests/dungeon_test.py
@@ -1,6 +1,6 @@
from unittest import TestCase
-from donjuan import Dungeon, SquareGrid
+from donjuan import Dungeon, HexGrid, SquareGrid
class DungeonTest(TestCase):
@@ -8,6 +8,17 @@ class DungeonTest(TestCase):
d = Dungeon()
assert d is not None
+ def test_initial_attributes(self):
+ d = Dungeon()
+ assert d.rooms == {}
+
+ def test_hex_grid(self):
+ hg = HexGrid(4, 5)
+ d = Dungeon(grid=hg)
+ assert isinstance(d.grid, HexGrid)
+ assert d.grid.n_rows == 4
+ assert d.grid.n_cols == 5
+
def test_pass_dimensions(self):
d = Dungeon(n_rows=4, n_cols=5)
assert d.grid.n_rows == 4
diff --git a/tests/grid_test.py b/tests/grid_test.py
index d1bd39e..b0f5d0f 100644
--- a/tests/grid_test.py
+++ b/tests/grid_test.py
@@ -20,24 +20,41 @@ class SquareGridTest(TestCase):
assert sg.n_rows == 5
assert isinstance(sg.cells[0][0], SquareCell)
+ def test_cell_coordinates(self):
+ sg = SquareGrid(5, 4)
+ for i in range(sg.n_rows):
+ for j in range(sg.n_cols):
+ assert sg.cells[i][j].coordinates == (i, j)
+
def test_get_filled_grid(self):
- sg = SquareGrid(5, 5)
+ sg = SquareGrid(5, 4)
fg = sg.get_filled_grid()
assert all(fg)
def test_get_filled_grid_some_unfilled(self):
- sg = SquareGrid(5, 5)
+ sg = SquareGrid(5, 4)
for i in range(5):
sg.cells[i][3].filled = True
fg = sg.get_filled_grid()
for i in range(5):
- for j in range(5):
+ for j in range(4):
assert fg[i][j] == sg.cells[i][j].filled, (i, j)
if j != 3:
assert not fg[i][j], (i, j)
else:
assert fg[i][j], (i, j)
+ def test_reset_cell_coordinates(self):
+ cells = [[SquareCell() for i in range(4)] for j in range(5)]
+ sg = SquareGrid.from_cells(cells)
+ for i in range(sg.n_rows):
+ for j in range(sg.n_cols):
+ assert sg.cells[i][j].coordinates is None
+ sg.reset_cell_coordinates()
+ for i in range(sg.n_rows):
+ for j in range(sg.n_cols):
+ assert sg.cells[i][j].coordinates == (i, j)
+
class HexGridTest(TestCase):
def test_smoke(self):
diff --git a/tests/room_test.py b/tests/room_test.py
new file mode 100644
index 0000000..8d7e5fa
--- /dev/null
+++ b/tests/room_test.py
@@ -0,0 +1,34 @@
+from copy import deepcopy
+from unittest import TestCase
+
+import pytest
+
+from donjuan import Room, SquareCell
+
+
+class RoomTest(TestCase):
+ def test_smoke(self):
+ r = Room()
+ assert r is not None
+ assert r.cells == [[]]
+
+ def test_assert_cell_coords(self):
+ c = SquareCell()
+ with pytest.raises(AssertionError):
+ Room(cells=[[c]])
+
+ def test_overlaps(self):
+ cs = [[SquareCell(coordinates=(i, j)) for j in range(5)] for i in range(4)]
+ r1 = Room(cs)
+ r2 = Room(deepcopy(cs))
+ assert r1.overlaps(r2)
+
+ def test_no_overlap(self):
+ cs = [[SquareCell(coordinates=(i, j)) for j in range(5)] for i in range(4)]
+ r1 = Room(cs)
+ cs2 = deepcopy(cs)
+ for i in range(len(cs)):
+ for j in range(len(cs[0])):
+ cs2[i][j].set_coordinates(100 + i, j)
+ r2 = Room(cs2)
+ assert not r1.overlaps(r2)
|
Create a Room class
A `Room` class should function similarly to a `Grid`, except it shouldn't have to be square (it would be cool to make funky-shaped rooms). The `Dungeon` should also have a dictionary of rooms.
This issue should be expanded on once thought is put into what attributes it should have.
|
0.0
|
412d275b8441cf88a1dbdfb3791737fd781d946d
|
[
"tests/cell_test.py::SquareCellTest::test_coordinates",
"tests/cell_test.py::SquareCellTest::test_faces",
"tests/cell_test.py::SquareCellTest::test_filled",
"tests/cell_test.py::SquareCellTest::test_n_sides",
"tests/cell_test.py::SquareCellTest::test_smoke",
"tests/cell_test.py::HexCellTest::test_coordinates",
"tests/cell_test.py::HexCellTest::test_faces",
"tests/cell_test.py::HexCellTest::test_n_sides",
"tests/cell_test.py::HexCellTest::test_smoke",
"tests/dungeon_test.py::DungeonTest::test_hex_grid",
"tests/dungeon_test.py::DungeonTest::test_initial_attributes",
"tests/dungeon_test.py::DungeonTest::test_pass_dimensions",
"tests/dungeon_test.py::DungeonTest::test_pass_grid",
"tests/dungeon_test.py::DungeonTest::test_smoke",
"tests/grid_test.py::SquareGridTest::test_cell_coordinates",
"tests/grid_test.py::SquareGridTest::test_from_cells",
"tests/grid_test.py::SquareGridTest::test_get_filled_grid",
"tests/grid_test.py::SquareGridTest::test_get_filled_grid_some_unfilled",
"tests/grid_test.py::SquareGridTest::test_reset_cell_coordinates",
"tests/grid_test.py::SquareGridTest::test_smoke",
"tests/grid_test.py::SquareGridTest::test_smoke_all_args",
"tests/grid_test.py::HexGridTest::test_from_cells",
"tests/grid_test.py::HexGridTest::test_smoke",
"tests/grid_test.py::HexGridTest::test_smoke_all_args",
"tests/room_test.py::RoomTest::test_assert_cell_coords",
"tests/room_test.py::RoomTest::test_no_overlap",
"tests/room_test.py::RoomTest::test_overlaps",
"tests/room_test.py::RoomTest::test_smoke"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-25 20:17:45+00:00
|
cc0-1.0
| 5,927 |
|
tmux-python__libtmux-394
|
diff --git a/CHANGES b/CHANGES
index 5ff9a81..6584792 100644
--- a/CHANGES
+++ b/CHANGES
@@ -12,6 +12,16 @@ $ pip install --user --upgrade --pre libtmux
- _Insert changes/features/fixes for next release here_
+### Tests and docs
+
+- Initial [doctests] examples stubbed out {issue}`#394`
+
+ [doctests]: https://docs.python.org/3/library/doctest.html
+
+- Fix bug in `temp_window()` context manager, {issue}`#394`
+- Pytest configuration `conftest.py` moved to `libtmux/conftest.py`, so doctest can
+ detect the fixtures {issue}`#394`
+
## libtmux 0.13.0 (2022-08-05)
### What's new
diff --git a/docs/conf.py b/docs/conf.py
index 7264b73..cb7f649 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -10,7 +10,7 @@ import libtmux # NOQA
from libtmux import test # NOQA
# Get the project root dir, which is the parent dir of this
-cwd = Path.cwd()
+cwd = Path(__file__).parent
project_root = cwd.parent
sys.path.insert(0, str(project_root))
@@ -18,7 +18,7 @@ sys.path.insert(0, str(cwd / "_ext"))
# package data
about: Dict[str, str] = {}
-with open("../libtmux/__about__.py") as fp:
+with open(project_root / "libtmux" / "__about__.py") as fp:
exec(fp.read(), about)
extensions = [
diff --git a/libtmux/pane.py b/libtmux/pane.py
index 2e93dda..191dade 100644
--- a/libtmux/pane.py
+++ b/libtmux/pane.py
@@ -36,6 +36,20 @@ class Pane(TmuxMappingObject):
----------
window : :class:`Window`
+ Examples
+ --------
+ >>> pane
+ Pane(%1 Window(@1 ...:..., Session($1 ...)))
+
+ >>> pane in window.panes
+ True
+
+ >>> pane.window
+ Window(@1 ...:..., Session($1 ...))
+
+ >>> pane.session
+ Session($1 ...)
+
Notes
-----
@@ -119,8 +133,7 @@ class Pane(TmuxMappingObject):
suppress_history: t.Optional[bool] = True,
literal: t.Optional[bool] = False,
) -> None:
- """
- ``$ tmux send-keys`` to the pane.
+ r"""``$ tmux send-keys`` to the pane.
A leading space character is added to cmd to avoid polluting the
user's history.
@@ -135,6 +148,22 @@ class Pane(TmuxMappingObject):
Don't add these keys to the shell history, default True.
literal : bool, optional
Send keys literally, default True.
+
+ Examples
+ --------
+ >>> pane = window.split_window(shell='sh')
+ >>> pane.capture_pane()
+ ['$']
+
+ >>> pane.send_keys('echo "Hello world"', suppress_history=False, enter=True)
+
+ >>> pane.capture_pane()
+ ['$ echo "Hello world"', 'Hello world', '$']
+
+ >>> print('\n'.join(pane.capture_pane())) # doctest: +NORMALIZE_WHITESPACE
+ $ echo "Hello world"
+ Hello world
+ $
"""
prefix = " " if suppress_history else ""
diff --git a/libtmux/server.py b/libtmux/server.py
index b31a3d0..beaa82a 100644
--- a/libtmux/server.py
+++ b/libtmux/server.py
@@ -47,6 +47,23 @@ class Server(TmuxRelationalObject["Session", "SessionDict"], EnvironmentMixin):
config_file : str, optional
colors : str, optional
+ Examples
+ --------
+ >>> server
+ <libtmux.server.Server object at ...>
+
+ >>> server.sessions
+ [Session($1 ...)]
+
+ >>> server.sessions[0].windows
+ [Window(@1 ...:..., Session($1 ...)]
+
+ >>> server.sessions[0].attached_window
+ Window(@1 ...:..., Session($1 ...)
+
+ >>> server.sessions[0].attached_pane
+ Pane(%1 Window(@1 ...:..., Session($1 ...)))
+
References
----------
.. [server_manual] CLIENTS AND SESSIONS. openbsd manpage for TMUX(1)
diff --git a/libtmux/session.py b/libtmux/session.py
index 1133c99..e2d1b61 100644
--- a/libtmux/session.py
+++ b/libtmux/session.py
@@ -43,6 +43,20 @@ class Session(
----------
server : :class:`Server`
+ Examples
+ --------
+ >>> session
+ Session($1 ...)
+
+ >>> session.windows
+ [Window(@1 ...:..., Session($1 ...)]
+
+ >>> session.attached_window
+ Window(@1 ...:..., Session($1 ...)
+
+ >>> session.attached_pane
+ Pane(%1 Window(@1 ...:..., Session($1 ...)))
+
References
----------
.. [session_manual] tmux session. openbsd manpage for TMUX(1).
diff --git a/libtmux/window.py b/libtmux/window.py
index addfae7..d38b829 100644
--- a/libtmux/window.py
+++ b/libtmux/window.py
@@ -39,6 +39,32 @@ class Window(TmuxMappingObject, TmuxRelationalObject["Pane", "PaneDict"]):
----------
session : :class:`Session`
+ Examples
+ --------
+ >>> window = session.new_window('My project')
+
+ >>> window
+ Window(@... ...:My project, Session($... ...))
+
+ Windows have panes:
+
+ >>> window.panes
+ [Pane(...)]
+
+ >>> window.attached_pane
+ Pane(...)
+
+ Relations moving up:
+
+ >>> window.session
+ Session(...)
+
+ >>> window == session.attached_window
+ True
+
+ >>> window in session.windows
+ True
+
References
----------
.. [window_manual] tmux window. openbsd manpage for TMUX(1).
@@ -296,6 +322,17 @@ class Window(TmuxMappingObject, TmuxRelationalObject["Pane", "PaneDict"]):
----------
new_name : str
name of the window
+
+ Examples
+ --------
+
+ >>> window = session.attached_window
+
+ >>> window.rename_window('My project')
+ Window(@1 ...:My project, Session($1 ...))
+
+ >>> window.rename_window('New name')
+ Window(@1 ...:New name, Session($1 ...))
"""
import shlex
diff --git a/setup.cfg b/setup.cfg
index b2e5f38..7139e23 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -19,4 +19,5 @@ line_length = 88
[tool:pytest]
filterwarnings =
ignore:.* Use packaging.version.*:DeprecationWarning::
-
+addopts = --tb=short --no-header --showlocals --doctest-modules
+doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
|
tmux-python/libtmux
|
e3895d094660b1814b85d8a36f0abb291b2f8f79
|
diff --git a/libtmux/conftest.py b/libtmux/conftest.py
new file mode 100644
index 0000000..b6e76a8
--- /dev/null
+++ b/libtmux/conftest.py
@@ -0,0 +1,119 @@
+import logging
+import os
+import typing as t
+
+import pytest
+
+from _pytest.fixtures import SubRequest
+from _pytest.monkeypatch import MonkeyPatch
+
+from libtmux import exc
+from libtmux.common import which
+from libtmux.server import Server
+from libtmux.test import TEST_SESSION_PREFIX, get_test_session_name, namer
+
+if t.TYPE_CHECKING:
+ from libtmux.session import Session
+
+logger = logging.getLogger(__name__)
+
+
[email protected](autouse=True)
+def clear_env(monkeypatch: MonkeyPatch) -> None:
+ """Clear out any unnecessary environment variables that could interrupt tests.
+
+ tmux show-environment tests were being interrupted due to a lot of crazy env vars.
+ """
+ for k, v in os.environ.items():
+ if not any(
+ needle in k.lower()
+ for needle in [
+ "window",
+ "tmux",
+ "pane",
+ "session",
+ "pytest",
+ "path",
+ "pwd",
+ "shell",
+ "home",
+ "xdg",
+ "disable_auto_title",
+ "lang",
+ "term",
+ ]
+ ):
+ monkeypatch.delenv(k)
+
+
[email protected](scope="function")
+def server(request: SubRequest, monkeypatch: MonkeyPatch) -> Server:
+
+ t = Server()
+ t.socket_name = "tmuxp_test%s" % next(namer)
+
+ def fin() -> None:
+ t.kill_server()
+
+ request.addfinalizer(fin)
+
+ return t
+
+
[email protected](scope="function")
+def session(request: SubRequest, server: Server) -> "Session":
+ session_name = "tmuxp"
+
+ if not server.has_session(session_name):
+ server.cmd("new-session", "-d", "-s", session_name)
+
+ # find current sessions prefixed with tmuxp
+ old_test_sessions = []
+ for s in server._sessions:
+ old_name = s.get("session_name")
+ if old_name is not None and old_name.startswith(TEST_SESSION_PREFIX):
+ old_test_sessions.append(old_name)
+
+ TEST_SESSION_NAME = get_test_session_name(server=server)
+
+ try:
+ session = server.new_session(session_name=TEST_SESSION_NAME)
+ except exc.LibTmuxException as e:
+ raise e
+
+ """
+ Make sure that tmuxp can :ref:`test_builder_visually` and switches to
+ the newly created session for that testcase.
+ """
+ session_id = session.get("session_id")
+ assert session_id is not None
+
+ try:
+ server.switch_client(target_session=session_id)
+ except exc.LibTmuxException:
+ # server.attach_session(session.get('session_id'))
+ pass
+
+ for old_test_session in old_test_sessions:
+ logger.debug("Old test test session %s found. Killing it." % old_test_session)
+ server.kill_session(old_test_session)
+ assert TEST_SESSION_NAME == session.get("session_name")
+ assert TEST_SESSION_NAME != "tmuxp"
+
+ return session
+
+
[email protected](autouse=True)
+def add_doctest_fixtures(
+ doctest_namespace: t.Dict[str, t.Any],
+ # usefixtures / autouse
+ clear_env: t.Any,
+ # Normal fixtures
+ server: "Server",
+ session: "Session",
+) -> None:
+ if which("tmux"):
+ doctest_namespace["server"] = server
+ doctest_namespace["session"] = session
+ doctest_namespace["window"] = session.attached_window
+ doctest_namespace["pane"] = session.attached_pane
diff --git a/libtmux/test.py b/libtmux/test.py
index 4af61b1..19e9a8b 100644
--- a/libtmux/test.py
+++ b/libtmux/test.py
@@ -16,6 +16,7 @@ logger = logging.getLogger(__name__)
if t.TYPE_CHECKING:
from libtmux.session import Session
+ from libtmux.window import Window
TEST_SESSION_PREFIX = "libtmux_"
RETRY_TIMEOUT_SECONDS = int(os.getenv("RETRY_TIMEOUT_SECONDS", 8))
@@ -68,16 +69,17 @@ def retry_until(
Examples
--------
- >>> def f():
- ... p = w.attached_pane
+ >>> def fn():
+ ... p = session.attached_window.attached_pane
... p.server._update_panes()
- ... return p.current_path == pane_path
- ...
- ... retry(f)
+ ... return p.current_path is not None
+
+ >>> retry_until(fn)
+ True
In pytest:
- >>> assert retry(f, raises=False)
+ >>> assert retry_until(fn, raises=False)
"""
ini = time.time()
@@ -179,6 +181,7 @@ def temp_session(
>>> with temp_session(server) as session:
... session.new_window(window_name='my window')
+ Window(@... ...:..., Session($... ...))
"""
if "session_name" in kwargs:
@@ -199,7 +202,7 @@ def temp_session(
@contextlib.contextmanager
def temp_window(
session: "Session", *args: t.Any, **kwargs: t.Any
-) -> t.Generator["Session", t.Any, t.Any]:
+) -> t.Generator["Window", t.Any, t.Any]:
"""
Return a context manager with a temporary window.
@@ -229,7 +232,13 @@ def temp_window(
--------
>>> with temp_window(session) as window:
- ... my_pane = window.split_window()
+ ... window
+ Window(@... ...:..., Session($... ...))
+
+
+ >>> with temp_window(session) as window:
+ ... window.split_window()
+ Pane(%... Window(@... ...:..., Session($... ...)))
"""
if "window_name" not in kwargs:
@@ -245,7 +254,7 @@ def temp_window(
assert isinstance(window_id, str)
try:
- yield session
+ yield window
finally:
if session.find_where({"window_id": window_id}):
window.kill_window()
diff --git a/tests/conftest.py b/tests/conftest.py
index 2a6d873..535e579 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,102 +1,1 @@
-import logging
-import os
-import typing as t
-
-import pytest
-
-from _pytest.fixtures import SubRequest
-from _pytest.monkeypatch import MonkeyPatch
-
-from libtmux import exc
-from libtmux.server import Server
-from libtmux.test import TEST_SESSION_PREFIX, get_test_session_name, namer
-
-if t.TYPE_CHECKING:
- from libtmux.session import Session
-
-logger = logging.getLogger(__name__)
-
-
[email protected](autouse=True)
-def clear_env(monkeypatch: MonkeyPatch) -> None:
- """Clear out any unnecessary environment variables that could interrupt tests.
-
- tmux show-environment tests were being interrupted due to a lot of crazy env vars.
- """
- for k, v in os.environ.items():
- if not any(
- needle in k.lower()
- for needle in [
- "window",
- "tmux",
- "pane",
- "session",
- "pytest",
- "path",
- "pwd",
- "shell",
- "home",
- "xdg",
- "disable_auto_title",
- "lang",
- "term",
- ]
- ):
- monkeypatch.delenv(k)
-
-
[email protected](scope="function")
-def server(request: SubRequest, monkeypatch: MonkeyPatch) -> Server:
-
- t = Server()
- t.socket_name = "tmuxp_test%s" % next(namer)
-
- def fin() -> None:
- t.kill_server()
-
- request.addfinalizer(fin)
-
- return t
-
-
[email protected](scope="function")
-def session(request: SubRequest, server: Server) -> "Session":
- session_name = "tmuxp"
-
- if not server.has_session(session_name):
- server.cmd("new-session", "-d", "-s", session_name)
-
- # find current sessions prefixed with tmuxp
- old_test_sessions = []
- for s in server._sessions:
- old_name = s.get("session_name")
- if old_name is not None and old_name.startswith(TEST_SESSION_PREFIX):
- old_test_sessions.append(old_name)
-
- TEST_SESSION_NAME = get_test_session_name(server=server)
-
- try:
- session = server.new_session(session_name=TEST_SESSION_NAME)
- except exc.LibTmuxException as e:
- raise e
-
- """
- Make sure that tmuxp can :ref:`test_builder_visually` and switches to
- the newly created session for that testcase.
- """
- session_id = session.get("session_id")
- assert session_id is not None
-
- try:
- server.switch_client(target_session=session_id)
- except exc.LibTmuxException:
- # server.attach_session(session.get('session_id'))
- pass
-
- for old_test_session in old_test_sessions:
- logger.debug("Old test test session %s found. Killing it." % old_test_session)
- server.kill_session(old_test_session)
- assert TEST_SESSION_NAME == session.get("session_name")
- assert TEST_SESSION_NAME != "tmuxp"
-
- return session
+from libtmux.conftest import * # noqa F40
|
Doctest examples
We should have doctest examples everywhere we can. Even if tests are 4x as long
|
0.0
|
e3895d094660b1814b85d8a36f0abb291b2f8f79
|
[
"libtmux/test.py::libtmux.test.retry_until",
"libtmux/test.py::libtmux.test.temp_session",
"libtmux/test.py::libtmux.test.temp_window"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-05 22:10:21+00:00
|
mit
| 5,928 |
|
tobac-project__tobac-244
|
diff --git a/tobac/feature_detection.py b/tobac/feature_detection.py
index d72776a..d400362 100644
--- a/tobac/feature_detection.py
+++ b/tobac/feature_detection.py
@@ -942,6 +942,7 @@ def feature_detection_multithreshold(
dz=dz,
min_distance=min_distance,
z_coordinate_name=vertical_coord,
+ target=target,
)
list_features_timesteps.append(features_thresholds)
@@ -978,6 +979,7 @@ def filter_min_distance(
x_coordinate_name=None,
y_coordinate_name=None,
z_coordinate_name=None,
+ target="maximum",
):
"""Function to remove features that are too close together.
If two features are closer than `min_distance`, it keeps the
@@ -1008,6 +1010,9 @@ def filter_min_distance(
z_coordinate_name: str or None
The name of the z coordinate to calculate distance based on in meters.
This is typically `altitude`. If `auto`, tries to auto-detect.
+ target: {'maximum', 'minimum'}, optional
+ Flag to determine if tracking is targetting minima or maxima in
+ the data. Default is 'maximum'.
Returns
-------
@@ -1052,6 +1057,11 @@ def filter_min_distance(
"Set dz to none if you want to use altitude or set `z_coordinate_name` to None to use constant dz."
)
+ if target not in ["minimum", "maximum"]:
+ raise ValueError(
+ "target parameter must be set to either 'minimum' or 'maximum'"
+ )
+
# create list of tuples with all combinations of features at the timestep:
indices = combinations(features.index.values, 2)
# Loop over combinations to remove features that are closer together than min_distance and keep larger one (either higher threshold or larger area)
@@ -1092,25 +1102,56 @@ def filter_min_distance(
if distance <= min_distance:
# print(distance, min_distance, index_1, index_2, features.size)
# logging.debug('distance<= min_distance: ' + str(distance))
- if (
- features.loc[index_1, "threshold_value"]
- > features.loc[index_2, "threshold_value"]
- ):
- remove_list_distance.append(index_2)
- elif (
- features.loc[index_1, "threshold_value"]
- < features.loc[index_2, "threshold_value"]
- ):
- remove_list_distance.append(index_1)
- elif (
- features.loc[index_1, "threshold_value"]
- == features.loc[index_2, "threshold_value"]
- ):
- if features.loc[index_1, "num"] > features.loc[index_2, "num"]:
+ if target == "maximum":
+ if (
+ features.loc[index_1, "threshold_value"]
+ > features.loc[index_2, "threshold_value"]
+ ):
remove_list_distance.append(index_2)
- elif features.loc[index_1, "num"] < features.loc[index_2, "num"]:
+ elif (
+ features.loc[index_1, "threshold_value"]
+ < features.loc[index_2, "threshold_value"]
+ ):
remove_list_distance.append(index_1)
- elif features.loc[index_1, "num"] == features.loc[index_2, "num"]:
+ elif (
+ features.loc[index_1, "threshold_value"]
+ == features.loc[index_2, "threshold_value"]
+ ):
+ if features.loc[index_1, "num"] > features.loc[index_2, "num"]:
+ remove_list_distance.append(index_2)
+ elif (
+ features.loc[index_1, "num"] < features.loc[index_2, "num"]
+ ):
+ remove_list_distance.append(index_1)
+ elif (
+ features.loc[index_1, "num"] == features.loc[index_2, "num"]
+ ):
+ remove_list_distance.append(index_2)
+ elif target == "minimum":
+ if (
+ features.loc[index_1, "threshold_value"]
+ < features.loc[index_2, "threshold_value"]
+ ):
remove_list_distance.append(index_2)
+ elif (
+ features.loc[index_1, "threshold_value"]
+ > features.loc[index_2, "threshold_value"]
+ ):
+ remove_list_distance.append(index_1)
+ elif (
+ features.loc[index_1, "threshold_value"]
+ == features.loc[index_2, "threshold_value"]
+ ):
+ if features.loc[index_1, "num"] > features.loc[index_2, "num"]:
+ remove_list_distance.append(index_2)
+ elif (
+ features.loc[index_1, "num"] < features.loc[index_2, "num"]
+ ):
+ remove_list_distance.append(index_1)
+ elif (
+ features.loc[index_1, "num"] == features.loc[index_2, "num"]
+ ):
+ remove_list_distance.append(index_2)
+
features = features[~features.index.isin(remove_list_distance)]
return features
|
tobac-project/tobac
|
ce390a36e40fe330e59a7c03ad6d02d77e54e812
|
diff --git a/tobac/tests/test_feature_detection.py b/tobac/tests/test_feature_detection.py
index 87b2423..c01c780 100644
--- a/tobac/tests/test_feature_detection.py
+++ b/tobac/tests/test_feature_detection.py
@@ -57,70 +57,6 @@ def test_feature_detection_multithreshold_timestep(
assert fd_output.iloc[0]["hdim_2"] == pytest.approx(test_hdim_2_pt)
[email protected](
- "test_threshs, min_distance, dxy", [([1, 2, 3], 100000, 10000)]
-)
-def test_filter_min_distance(test_threshs, min_distance, dxy):
- """
- Tests ```tobac.feature_detection.filter_min_distance```
- """
- # start by building a simple dataset with two features close to each other
-
- test_dset_size = (50, 50)
- test_hdim_1_pt = 20.0
- test_hdim_2_pt = 20.0
- test_hdim_1_sz = 5
- test_hdim_2_sz = 5
- test_amp = 5
- test_min_num = 2
-
- test_data = np.zeros(test_dset_size)
- test_data = tbtest.make_feature_blob(
- test_data,
- test_hdim_1_pt,
- test_hdim_2_pt,
- h1_size=test_hdim_1_sz,
- h2_size=test_hdim_2_sz,
- amplitude=test_amp,
- )
-
- ## add another blob with smaller value
- test_hdim_1_pt2 = 25.0
- test_hdim_2_pt2 = 25.0
- test_hdim_1_sz2 = 2
- test_hdim_2_sz2 = 2
- test_amp2 = 3
- test_data = tbtest.make_feature_blob(
- test_data,
- test_hdim_1_pt2,
- test_hdim_2_pt2,
- h1_size=test_hdim_1_sz2,
- h2_size=test_hdim_2_sz2,
- amplitude=test_amp2,
- )
- test_data_iris = tbtest.make_dataset_from_arr(test_data, data_type="iris")
-
- # identify these features
- fd_output = feat_detect.feature_detection_multithreshold_timestep(
- test_data_iris,
- 0,
- threshold=test_threshs,
- n_min_threshold=test_min_num,
- min_distance=min_distance,
- dxy=dxy,
- )
-
- # check if it function to filter
- fd_filtered = feat_detect.filter_min_distance(fd_output, dxy, min_distance)
-
- # Make sure we have only one feature (small feature in minimum distance should be removed )
- assert len(fd_output.index) == 2
- assert len(fd_filtered.index) == 1
- # Make sure that the locations of the features is correct (should correspond to locations of first feature)
- assert fd_filtered.iloc[0]["hdim_1"] == pytest.approx(test_hdim_1_pt)
- assert fd_filtered.iloc[0]["hdim_2"] == pytest.approx(test_hdim_2_pt)
-
-
@pytest.mark.parametrize(
"position_threshold", [("center"), ("extreme"), ("weighted_diff"), ("weighted_abs")]
)
@@ -154,45 +90,165 @@ def test_feature_detection_position(position_threshold):
@pytest.mark.parametrize(
"feature_1_loc, feature_2_loc, dxy, dz, min_distance,"
- " add_x_coords, add_y_coords,"
+ "target, add_x_coords, add_y_coords,"
"add_z_coords, expect_feature_1, expect_feature_2",
[
- (
+ ( # If separation greater than min_distance, keep both features
(0, 0, 0, 4, 1),
(1, 1, 1, 4, 1),
1000,
100,
1,
+ "maximum",
False,
False,
False,
True,
True,
),
- (
+ ( # Keep feature 1 by area
(0, 0, 0, 4, 1),
(1, 1, 1, 3, 1),
1000,
100,
5000,
+ "maximum",
False,
False,
False,
True,
False,
),
- (
+ ( # Keep feature 2 by area
+ (0, 0, 0, 4, 1),
+ (1, 1, 1, 6, 1),
+ 1000,
+ 100,
+ 5000,
+ "maximum",
+ False,
+ False,
+ False,
+ False,
+ True,
+ ),
+ ( # Keep feature 1 by area
+ (0, 0, 0, 4, 1),
+ (1, 1, 1, 3, 1),
+ 1000,
+ 100,
+ 5000,
+ "minimum",
+ False,
+ False,
+ False,
+ True,
+ False,
+ ),
+ ( # Keep feature 2 by area
+ (0, 0, 0, 4, 1),
+ (1, 1, 1, 6, 1),
+ 1000,
+ 100,
+ 5000,
+ "minimum",
+ False,
+ False,
+ False,
+ False,
+ True,
+ ),
+ ( # Keep feature 1 by maximum threshold
+ (0, 0, 0, 4, 2),
+ (1, 1, 1, 10, 1),
+ 1000,
+ 100,
+ 5000,
+ "maximum",
+ False,
+ False,
+ False,
+ True,
+ False,
+ ),
+ ( # Keep feature 2 by maximum threshold
+ (0, 0, 0, 4, 2),
+ (1, 1, 1, 10, 3),
+ 1000,
+ 100,
+ 5000,
+ "maximum",
+ False,
+ False,
+ False,
+ False,
+ True,
+ ),
+ ( # Keep feature 1 by minimum threshold
+ (0, 0, 0, 4, -1),
+ (1, 1, 1, 10, 1),
+ 1000,
+ 100,
+ 5000,
+ "minimum",
+ False,
+ False,
+ False,
+ True,
+ False,
+ ),
+ ( # Keep feature 2 by minimum threshold
(0, 0, 0, 4, 2),
(1, 1, 1, 10, 1),
1000,
100,
5000,
+ "minimum",
+ False,
+ False,
+ False,
+ False,
+ True,
+ ),
+ ( # Keep feature 1 by tie-break
+ (0, 0, 0, 4, 2),
+ (1, 1, 1, 4, 2),
+ 1000,
+ 100,
+ 5000,
+ "maximum",
False,
False,
False,
True,
False,
),
+ ( # Keep feature 1 by tie-break
+ (0, 0, 0, 4, 2),
+ (1, 1, 1, 4, 2),
+ 1000,
+ 100,
+ 5000,
+ "minimum",
+ False,
+ False,
+ False,
+ True,
+ False,
+ ),
+ ( # If target is not maximum or minimum raise ValueError
+ (0, 0, 0, 4, 1),
+ (1, 1, 1, 4, 1),
+ 1000,
+ 100,
+ 1,
+ "chaos",
+ False,
+ False,
+ False,
+ False,
+ False,
+ ),
],
)
def test_filter_min_distance(
@@ -201,6 +257,7 @@ def test_filter_min_distance(
dxy,
dz,
min_distance,
+ target,
add_x_coords,
add_y_coords,
add_z_coords,
@@ -223,6 +280,8 @@ def test_filter_min_distance(
Vertical grid spacing (constant)
min_distance: float
Minimum distance between features (m)
+ target: str ["maximum" | "minimum"]
+ Target maxima or minima threshold for selecting which feature to keep
add_x_coords: bool
Whether or not to add x coordinates
add_y_coords: bool
@@ -296,12 +355,17 @@ def test_filter_min_distance(
"dxy": dxy,
"dz": dz,
"min_distance": min_distance,
+ "target": target,
}
+ if target not in ["maximum", "minimum"]:
+ with pytest.raises(ValueError):
+ out_feats = feat_detect.filter_min_distance(**filter_dist_opts)
- out_feats = feat_detect.filter_min_distance(**filter_dist_opts)
+ else:
+ out_feats = feat_detect.filter_min_distance(**filter_dist_opts)
- assert expect_feature_1 == (np.sum(out_feats["feature"] == 1) == 1)
- assert expect_feature_2 == (np.sum(out_feats["feature"] == 2) == 1)
+ assert expect_feature_1 == (np.sum(out_feats["feature"] == 1) == 1)
+ assert expect_feature_2 == (np.sum(out_feats["feature"] == 2) == 1)
@pytest.mark.parametrize(
|
Add "minima" target to `filter_min_distance`
Current behaviour for `filter_min_distance` is to always select the feature with the largest threshold, even if the feature detection is targeting minima. This is an aspect of #230 that requires more urgent attention
|
0.0
|
ce390a36e40fe330e59a7c03ad6d02d77e54e812
|
[
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc0-feature_2_loc0-1000-100-1-maximum-False-False-False-True-True]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc1-feature_2_loc1-1000-100-5000-maximum-False-False-False-True-False]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc2-feature_2_loc2-1000-100-5000-maximum-False-False-False-False-True]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc3-feature_2_loc3-1000-100-5000-minimum-False-False-False-True-False]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc4-feature_2_loc4-1000-100-5000-minimum-False-False-False-False-True]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc5-feature_2_loc5-1000-100-5000-maximum-False-False-False-True-False]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc6-feature_2_loc6-1000-100-5000-maximum-False-False-False-False-True]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc7-feature_2_loc7-1000-100-5000-minimum-False-False-False-True-False]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc8-feature_2_loc8-1000-100-5000-minimum-False-False-False-False-True]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc9-feature_2_loc9-1000-100-5000-maximum-False-False-False-True-False]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc10-feature_2_loc10-1000-100-5000-minimum-False-False-False-True-False]",
"tobac/tests/test_feature_detection.py::test_filter_min_distance[feature_1_loc11-feature_2_loc11-1000-100-1-chaos-False-False-False-False-False]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-10 17:11:45+00:00
|
bsd-3-clause
| 5,929 |
|
tobac-project__tobac-272
|
diff --git a/doc/feature_detection/notebooks/feature_detection_filtering.ipynb b/doc/feature_detection/notebooks/feature_detection_filtering.ipynb
index fc5fed9..4f3d5e1 100644
--- a/doc/feature_detection/notebooks/feature_detection_filtering.ipynb
+++ b/doc/feature_detection/notebooks/feature_detection_filtering.ipynb
@@ -199,7 +199,7 @@
" # This is the parameter for erosion that gets passed to the scikit-image library. \n",
" footprint = np.ones((erosion, erosion))\n",
" # This is what tobac sees after erosion. \n",
- " filtered_mask = skimage.morphology.binary_erosion(tobac_mask, selem).astype(np.int64)\n",
+ " filtered_mask = skimage.morphology.binary_erosion(tobac_mask, footprint).astype(np.int64)\n",
" else:\n",
" filtered_mask = tobac_mask\n",
"\n",
@@ -219,9 +219,9 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python [conda env:tobac_stable]",
+ "display_name": "Python 3 (ipykernel)",
"language": "python",
- "name": "conda-env-tobac_stable-py"
+ "name": "python3"
},
"language_info": {
"codemirror_mode": {
@@ -233,9 +233,8 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.5"
+ "version": "3.11.3"
},
- "orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "25a19fbe0a9132dfb9279d48d161753c6352f8f9478c2e74383d340069b907c3"
diff --git a/tobac/tracking.py b/tobac/tracking.py
index 7f567c4..0b7e2c1 100644
--- a/tobac/tracking.py
+++ b/tobac/tracking.py
@@ -440,7 +440,9 @@ def linking_trackpy(
# add time coordinate relative to cell initiation:
# logging.debug('start adding cell time to trajectories')
trajectories_filtered_filled = trajectories_filtered_unfilled
- trajectories_final = add_cell_time(trajectories_filtered_filled)
+ trajectories_final = add_cell_time(
+ trajectories_filtered_filled, cell_number_unassigned=cell_number_unassigned
+ )
# Add metadata
trajectories_final.attrs["cell_number_unassigned"] = cell_number_unassigned
@@ -532,13 +534,15 @@ def fill_gaps(
return t_out
-def add_cell_time(t):
+def add_cell_time(t: pd.DataFrame, cell_number_unassigned: int):
"""add cell time as time since the initiation of each cell
Parameters
----------
t : pandas.DataFrame
trajectories with added coordinates
+ cell_number_unassigned: int
+ unassigned cell value
Returns
-------
@@ -551,6 +555,7 @@ def add_cell_time(t):
t["time_cell"] = t["time"] - t.groupby("cell")["time"].transform("min")
t["time_cell"] = pd.to_timedelta(t["time_cell"])
+ t.loc[t["cell"] == cell_number_unassigned, "time_cell"] = pd.Timedelta("nat")
return t
|
tobac-project/tobac
|
434bcdba03f47d79dceef17f00caf48060552b83
|
diff --git a/tobac/tests/test_tracking.py b/tobac/tests/test_tracking.py
index 1a60e2e..15a400b 100644
--- a/tobac/tests/test_tracking.py
+++ b/tobac/tests/test_tracking.py
@@ -362,3 +362,40 @@ def test_argument_logic():
output = tobac.linking_trackpy(
cell_1, None, 1, 1, d_min=None, d_max=None, v_max=None
)
+
+
+def test_untracked_nat():
+ """
+ Tests to make sure that the untracked cells don't have timedelta assigned.
+ """
+ features = tobac.testing.generate_single_feature(
+ 1,
+ 1,
+ min_h1=0,
+ max_h1=101,
+ min_h2=0,
+ max_h2=101,
+ frame_start=0,
+ num_frames=2,
+ spd_h1=50,
+ spd_h2=50,
+ )
+
+ output = tobac.linking_trackpy(
+ features,
+ None,
+ 1,
+ 1,
+ d_max=40,
+ method_linking="random",
+ cell_number_unassigned=-1,
+ time_cell_min=2,
+ )
+
+ assert np.all(output["cell"].values == np.array([-1, -1]))
+ # NaT values cannot be compared, so instead we check for null values
+ # and check for the data type
+ assert np.all(pd.isnull(output["time_cell"]))
+ # the exact data type depends on architecture, so
+ # instead just check by name
+ assert output["time_cell"].dtype.name == "timedelta64[ns]"
|
Untracked cells shouldn't have `cell_time`
Currently, we give untracked cells (which are, by default, labeled `-1`) a `time_cell` value. In at least `RC_v1.5.0`, these values end up being equal to the length of the total track. We should instead make these `NaT` or similar.
|
0.0
|
434bcdba03f47d79dceef17f00caf48060552b83
|
[
"tobac/tests/test_tracking.py::test_untracked_nat"
] |
[
"tobac/tests/test_tracking.py::test_linking_trackpy",
"tobac/tests/test_tracking.py::test_3D_tracking_min_dist_z[point_init0-speed0-1000-100-200-True-True]",
"tobac/tests/test_tracking.py::test_3D_tracking_min_dist_z[point_init1-speed1-1000-100-200-False-True]",
"tobac/tests/test_tracking.py::test_3D_tracking_min_dist_z[point_init2-speed2-1000-100-200-True-False]",
"tobac/tests/test_tracking.py::test_3D_tracking_min_dist_z[point_init3-speed3-1000-100-200-False-False]",
"tobac/tests/test_tracking.py::test_tracking_extrapolation",
"tobac/tests/test_tracking.py::test_argument_logic"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-02 15:02:08+00:00
|
bsd-3-clause
| 5,930 |
|
tobgu__pyrsistent-281
|
diff --git a/pyrsistent/_helpers.py b/pyrsistent/_helpers.py
index 1320e65..b44bfc5 100644
--- a/pyrsistent/_helpers.py
+++ b/pyrsistent/_helpers.py
@@ -1,3 +1,4 @@
+import collections
from functools import wraps
from pyrsistent._pmap import PMap, pmap
from pyrsistent._pset import PSet, pset
@@ -10,6 +11,7 @@ def freeze(o, strict=True):
- list is converted to pvector, recursively
- dict is converted to pmap, recursively on values (but not keys)
+ - defaultdict is converted to pmap, recursively on values (but not keys)
- set is converted to pset, but not recursively
- tuple is converted to tuple, recursively.
@@ -33,6 +35,8 @@ def freeze(o, strict=True):
typ = type(o)
if typ is dict or (strict and isinstance(o, PMap)):
return pmap({k: freeze(v, strict) for k, v in o.items()})
+ if typ is collections.defaultdict or (strict and isinstance(o, PMap)):
+ return pmap({k: freeze(v, strict) for k, v in o.items()})
if typ is list or (strict and isinstance(o, PVector)):
curried_freeze = lambda x: freeze(x, strict)
return pvector(map(curried_freeze, o))
|
tobgu/pyrsistent
|
b091106b970fc45e0e0bee7fdb9484a80f3510ec
|
diff --git a/tests/freeze_test.py b/tests/freeze_test.py
index 6e0e2eb..158cf5d 100644
--- a/tests/freeze_test.py
+++ b/tests/freeze_test.py
@@ -1,5 +1,5 @@
"""Tests for freeze and thaw."""
-
+import collections
from pyrsistent import v, m, s, freeze, thaw, PRecord, field, mutant
@@ -17,6 +17,13 @@ def test_freeze_dict():
assert result == m(a='b')
assert type(freeze({'a': 'b'})) is type(m())
+def test_freeze_defaultdict():
+ test_dict = collections.defaultdict(dict)
+ test_dict['a'] = 'b'
+ result = freeze(test_dict)
+ assert result == m(a='b')
+ assert type(freeze({'a': 'b'})) is type(m())
+
def test_freeze_set():
result = freeze(set([1, 2, 3]))
assert result == s(1, 2, 3)
@@ -27,6 +34,13 @@ def test_freeze_recurse_in_dictionary_values():
assert result == m(a=v(1))
assert type(result['a']) is type(v())
+def test_freeze_recurse_in_defaultdict_values():
+ test_dict = collections.defaultdict(dict)
+ test_dict['a'] = [1]
+ result = freeze(test_dict)
+ assert result == m(a=v(1))
+ assert type(result['a']) is type(v())
+
def test_freeze_recurse_in_pmap_values():
input = {'a': m(b={'c': 1})}
result = freeze(input)
|
freeze() does not work on collections.defaultdict
The following code
```py
import collections
from pyrsistent import freeze
mydict = collections.defaultdict(dict)
mydict["a"]["x"] = 2
mydict["a"]["y"] = 3
mydict["b"]["z"] = 4
frozen = freeze(mydict)
print(type(frozen))
```
prints `<class 'collections.defaultdict'>` rather than `PMap` as I would expect. In other words `freeze` does not do anything to a `collections.defaultdict` when I would expect it to make it a persistent map instead.
|
0.0
|
b091106b970fc45e0e0bee7fdb9484a80f3510ec
|
[
"tests/freeze_test.py::test_freeze_recurse_in_defaultdict_values"
] |
[
"tests/freeze_test.py::test_freeze_basic",
"tests/freeze_test.py::test_freeze_list",
"tests/freeze_test.py::test_freeze_dict",
"tests/freeze_test.py::test_freeze_defaultdict",
"tests/freeze_test.py::test_freeze_set",
"tests/freeze_test.py::test_freeze_recurse_in_dictionary_values",
"tests/freeze_test.py::test_freeze_recurse_in_pmap_values",
"tests/freeze_test.py::test_freeze_recurse_in_lists",
"tests/freeze_test.py::test_freeze_recurse_in_pvectors",
"tests/freeze_test.py::test_freeze_recurse_in_tuples",
"tests/freeze_test.py::test_freeze_nonstrict_no_recurse_in_pmap_values",
"tests/freeze_test.py::test_freeze_nonstrict_no_recurse_in_pvectors",
"tests/freeze_test.py::test_thaw_basic",
"tests/freeze_test.py::test_thaw_list",
"tests/freeze_test.py::test_thaw_dict",
"tests/freeze_test.py::test_thaw_set",
"tests/freeze_test.py::test_thaw_recurse_in_mapping_values",
"tests/freeze_test.py::test_thaw_recurse_in_dict_values",
"tests/freeze_test.py::test_thaw_recurse_in_vectors",
"tests/freeze_test.py::test_thaw_recurse_in_lists",
"tests/freeze_test.py::test_thaw_recurse_in_tuples",
"tests/freeze_test.py::test_thaw_can_handle_subclasses_of_persistent_base_types",
"tests/freeze_test.py::test_thaw_non_strict_no_recurse_in_dict_values",
"tests/freeze_test.py::test_thaw_non_strict_no_recurse_in_lists",
"tests/freeze_test.py::test_mutant_decorator"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-10-14 06:38:53+00:00
|
mit
| 5,931 |
|
tobi-wan-kenobi__bumblebee-status-714
|
diff --git a/bumblebee_status/modules/contrib/arch_update.py b/bumblebee_status/modules/contrib/arch_update.py
new file mode 120000
index 0000000..57fd99f
--- /dev/null
+++ b/bumblebee_status/modules/contrib/arch_update.py
@@ -0,0 +1,1 @@
+arch-update.py
\ No newline at end of file
diff --git a/bumblebee_status/modules/contrib/battery_upower.py b/bumblebee_status/modules/contrib/battery_upower.py
new file mode 120000
index 0000000..4a7bb68
--- /dev/null
+++ b/bumblebee_status/modules/contrib/battery_upower.py
@@ -0,0 +1,1 @@
+battery-upower.py
\ No newline at end of file
diff --git a/bumblebee_status/modules/contrib/layout_xkbswitch.py b/bumblebee_status/modules/contrib/layout_xkbswitch.py
new file mode 120000
index 0000000..e7d6b94
--- /dev/null
+++ b/bumblebee_status/modules/contrib/layout_xkbswitch.py
@@ -0,0 +1,1 @@
+layout-xkbswitch.py
\ No newline at end of file
diff --git a/bumblebee_status/modules/core/layout_xkb.py b/bumblebee_status/modules/core/layout_xkb.py
new file mode 120000
index 0000000..f2e8037
--- /dev/null
+++ b/bumblebee_status/modules/core/layout_xkb.py
@@ -0,0 +1,1 @@
+layout-xkb.py
\ No newline at end of file
diff --git a/docs/development/module.rst b/docs/development/module.rst
index 1d6e716..113a6f7 100644
--- a/docs/development/module.rst
+++ b/docs/development/module.rst
@@ -11,6 +11,7 @@ Adding a new module to ``bumblebee-status`` is straight-forward:
``bumblebee-status`` (i.e. a module called
``bumblebee_status/modules/contrib/test.py`` will be loaded using
``bumblebee-status -m test``)
+- The module name must follow the `Python Naming Conventions <https://www.python.org/dev/peps/pep-0008/#package-and-module-names>`_
- See below for how to actually write the module
- Test (run ``bumblebee-status`` in the CLI)
- Make sure your changes don’t break anything: ``./coverage.sh``
|
tobi-wan-kenobi/bumblebee-status
|
96f8e92822f8b72287ef97bbdd9a0c9bf1a063da
|
diff --git a/tests/modules/contrib/test_arch-update.py b/tests/modules/contrib/test_arch-update.py
index 6a1c172..b11187b 100644
--- a/tests/modules/contrib/test_arch-update.py
+++ b/tests/modules/contrib/test_arch-update.py
@@ -3,3 +3,5 @@ import pytest
def test_load_module():
__import__("modules.contrib.arch-update")
+def test_load_symbolic_link_module():
+ __import__("modules.contrib.arch_update")
diff --git a/tests/modules/contrib/test_battery-upower.py b/tests/modules/contrib/test_battery-upower.py
index cb62a16..d129679 100644
--- a/tests/modules/contrib/test_battery-upower.py
+++ b/tests/modules/contrib/test_battery-upower.py
@@ -5,3 +5,6 @@ pytest.importorskip("dbus")
def test_load_module():
__import__("modules.contrib.battery-upower")
+def test_load_symbolic_link_module():
+ __import__("modules.contrib.battery_upower")
+
diff --git a/tests/modules/contrib/test_layout-xkbswitch.py b/tests/modules/contrib/test_layout-xkbswitch.py
index 08cfd96..b709254 100644
--- a/tests/modules/contrib/test_layout-xkbswitch.py
+++ b/tests/modules/contrib/test_layout-xkbswitch.py
@@ -3,3 +3,5 @@ import pytest
def test_load_module():
__import__("modules.contrib.layout-xkbswitch")
+def test_load_symbolic_link_module():
+ __import__("modules.contrib.layout_xkbswitch")
diff --git a/tests/modules/core/test_layout-xkb.py b/tests/modules/core/test_layout-xkb.py
index 8eacfad..852b9da 100644
--- a/tests/modules/core/test_layout-xkb.py
+++ b/tests/modules/core/test_layout-xkb.py
@@ -5,3 +5,5 @@ pytest.importorskip("xkbgroup")
def test_load_module():
__import__("modules.core.layout-xkb")
+def test_load_symbolic_link_module():
+ __import__("modules.core.layout_xkb")
|
Modules names with hyphens (-)
Hey, ya.
Should we rename the modules below to match the [Python Naming Conventions](https://www.python.org/dev/peps/pep-0008/#package-and-module-names)?
```sh
modules/core/layout-xkb.py
modules/contrib/layout-xkbswitch.py
modules/contrib/arch-update.py
modules/contrib/battery-upower.py
```
These modules work as expected (I think) in the status bar, but I can't import these modules into a test file using `import`.
```sh
E import modules.contrib.arch-update
E ^
E SyntaxError: invalid syntax
```
Any ideas?
|
0.0
|
96f8e92822f8b72287ef97bbdd9a0c9bf1a063da
|
[
"tests/modules/contrib/test_arch-update.py::test_load_symbolic_link_module",
"tests/modules/contrib/test_layout-xkbswitch.py::test_load_symbolic_link_module"
] |
[
"tests/modules/contrib/test_arch-update.py::test_load_module",
"tests/modules/contrib/test_layout-xkbswitch.py::test_load_module"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-01 22:19:14+00:00
|
mit
| 5,932 |
|
tobymao__sqlglot-1001
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index dff01ca4..e44be570 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3076,7 +3076,7 @@ class Parser(metaclass=_Parser):
def _parse_merge(self) -> exp.Expression:
self._match(TokenType.INTO)
- target = self._parse_table(schema=True)
+ target = self._parse_table()
self._match(TokenType.USING)
using = self._parse_table()
|
tobymao/sqlglot
|
4da01a08ac164e0ffeca1bd29d1e10c4297f2708
|
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index b2f46766..b23711bc 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -1365,3 +1365,19 @@ SELECT
"spark": "MERGE INTO target USING source ON target.id = source.id WHEN MATCHED THEN UPDATE * WHEN NOT MATCHED THEN INSERT *",
},
)
+ self.validate_all(
+ """
+ MERGE a b USING c d ON b.id = d.id
+ WHEN MATCHED AND EXISTS (
+ SELECT b.name
+ EXCEPT
+ SELECT d.name
+ )
+ THEN UPDATE SET b.name = d.name
+ """,
+ write={
+ "bigquery": "MERGE INTO a AS b USING c AS d ON b.id = d.id WHEN MATCHED AND EXISTS(SELECT b.name EXCEPT DISTINCT SELECT d.name) THEN UPDATE SET b.name = d.name",
+ "snowflake": "MERGE INTO a AS b USING c AS d ON b.id = d.id WHEN MATCHED AND EXISTS(SELECT b.name EXCEPT SELECT d.name) THEN UPDATE SET b.name = d.name",
+ "spark": "MERGE INTO a AS b USING c AS d ON b.id = d.id WHEN MATCHED AND EXISTS(SELECT b.name EXCEPT SELECT d.name) THEN UPDATE SET b.name = d.name",
+ },
+ )
|
Support for Merge
Hi there!
I have a t-sql merge statement:
```sql
MERGE [CORE].[DIM_BusinessUnit] a
USING [TRANSF].[Merge_BusinessUnit] b
ON a.[IdDWH] = b.[IdDWH]
WHEN MATCHED AND EXISTS
(
SELECT a.[Name]
EXCEPT
SELECT b.[Name]
)
THEN
UPDATE SET a.[Name] = b.[Name]
;
```
seems to be unparseable
Enough issues for a while now ;) sqlglot is great and already helping me a lot
|
0.0
|
4da01a08ac164e0ffeca1bd29d1e10c4297f2708
|
[
"tests/dialects/test_dialect.py::TestDialect::test_merge"
] |
[
"tests/dialects/test_dialect.py::TestDialect::test_alias",
"tests/dialects/test_dialect.py::TestDialect::test_array",
"tests/dialects/test_dialect.py::TestDialect::test_cast",
"tests/dialects/test_dialect.py::TestDialect::test_cross_join",
"tests/dialects/test_dialect.py::TestDialect::test_enum",
"tests/dialects/test_dialect.py::TestDialect::test_hash_comments",
"tests/dialects/test_dialect.py::TestDialect::test_json",
"tests/dialects/test_dialect.py::TestDialect::test_lateral_subquery",
"tests/dialects/test_dialect.py::TestDialect::test_limit",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_eq",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_neq",
"tests/dialects/test_dialect.py::TestDialect::test_operators",
"tests/dialects/test_dialect.py::TestDialect::test_order_by",
"tests/dialects/test_dialect.py::TestDialect::test_set_operators",
"tests/dialects/test_dialect.py::TestDialect::test_time",
"tests/dialects/test_dialect.py::TestDialect::test_transactions"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-01-23 14:54:01+00:00
|
mit
| 5,933 |
|
tobymao__sqlglot-1087
|
diff --git a/sqlglot/dataframe/sql/functions.py b/sqlglot/dataframe/sql/functions.py
index a141fe46..01492893 100644
--- a/sqlglot/dataframe/sql/functions.py
+++ b/sqlglot/dataframe/sql/functions.py
@@ -1144,10 +1144,16 @@ def aggregate(
merge_exp = _get_lambda_from_func(merge)
if finish is not None:
finish_exp = _get_lambda_from_func(finish)
- return Column.invoke_anonymous_function(
- col, "AGGREGATE", initialValue, Column(merge_exp), Column(finish_exp)
+ return Column.invoke_expression_over_column(
+ col,
+ glotexp.Reduce,
+ initial=initialValue,
+ merge=Column(merge_exp),
+ finish=Column(finish_exp),
)
- return Column.invoke_anonymous_function(col, "AGGREGATE", initialValue, Column(merge_exp))
+ return Column.invoke_expression_over_column(
+ col, glotexp.Reduce, initial=initialValue, merge=Column(merge_exp)
+ )
def transform(
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index ff654f8c..53798b35 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -148,6 +148,22 @@ def _serial_to_generated(expression):
return expression
+def _generate_series(args):
+ # The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day
+ step = seq_get(args, 2)
+
+ if step is None:
+ # Postgres allows calls with just two arguments -- the "step" argument defaults to 1
+ return exp.GenerateSeries.from_arg_list(args)
+
+ if step.is_string:
+ args[2] = exp.to_interval(step.this)
+ elif isinstance(step, exp.Interval) and not step.args.get("unit"):
+ args[2] = exp.to_interval(step.this.this)
+
+ return exp.GenerateSeries.from_arg_list(args)
+
+
def _to_timestamp(args):
# TO_TIMESTAMP accepts either a single double argument or (text, text)
if len(args) == 1:
@@ -260,6 +276,7 @@ class Postgres(Dialect):
"NOW": exp.CurrentTimestamp.from_arg_list,
"TO_TIMESTAMP": _to_timestamp,
"TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+ "GENERATE_SERIES": _generate_series,
}
BITWISE = {
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 8bade68c..1038e66a 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -105,6 +105,29 @@ def _ts_or_ds_add_sql(self, expression):
return f"DATE_ADD({unit}, {e}, DATE_PARSE(SUBSTR({this}, 1, 10), {Presto.date_format}))"
+def _sequence_sql(self, expression):
+ start = expression.args["start"]
+ end = expression.args["end"]
+ step = expression.args.get("step", 1) # Postgres defaults to 1 for generate_series
+
+ target_type = None
+
+ if isinstance(start, exp.Cast):
+ target_type = start.to
+ elif isinstance(end, exp.Cast):
+ target_type = end.to
+
+ if target_type and target_type.this == exp.DataType.Type.TIMESTAMP:
+ to = target_type.copy()
+
+ if target_type is start.to:
+ end = exp.Cast(this=end, to=to)
+ else:
+ start = exp.Cast(this=start, to=to)
+
+ return f"SEQUENCE({self.format_args(start, end, step)})"
+
+
def _ensure_utf8(charset):
if charset.name.lower() != "utf-8":
raise UnsupportedError(f"Unsupported charset {charset}")
@@ -235,7 +258,7 @@ class Presto(Dialect):
exp.Decode: _decode_sql,
exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)",
exp.Encode: _encode_sql,
- exp.GenerateSeries: rename_func("SEQUENCE"),
+ exp.GenerateSeries: _sequence_sql,
exp.Hex: rename_func("TO_HEX"),
exp.If: if_sql,
exp.ILike: no_ilike_sql,
diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py
index f20c4dc1..08ff775d 100644
--- a/sqlglot/dialects/spark.py
+++ b/sqlglot/dialects/spark.py
@@ -73,6 +73,7 @@ class Spark(Hive):
),
"APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
"IIF": exp.If.from_arg_list,
+ "AGGREGATE": exp.Reduce.from_arg_list,
}
FUNCTION_PARSERS = {
diff --git a/sqlglot/executor/env.py b/sqlglot/executor/env.py
index 04dc9381..ba9cbbdf 100644
--- a/sqlglot/executor/env.py
+++ b/sqlglot/executor/env.py
@@ -171,5 +171,6 @@ ENV = {
"STRPOSITION": str_position,
"SUB": null_if_any(lambda e, this: e - this),
"SUBSTRING": substring,
+ "TIMESTRTOTIME": null_if_any(lambda arg: datetime.datetime.fromisoformat(arg)),
"UPPER": null_if_any(lambda arg: arg.upper()),
}
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 135c49f2..acc99cb3 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -3248,7 +3248,7 @@ class ReadCSV(Func):
class Reduce(Func):
- arg_types = {"this": True, "initial": True, "merge": True, "finish": True}
+ arg_types = {"this": True, "initial": True, "merge": True, "finish": False}
class RegexpLike(Func):
@@ -3968,6 +3968,28 @@ def to_identifier(alias, quoted=None) -> t.Optional[Identifier]:
return identifier
+INTERVAL_STRING_RE = re.compile(r"\s*([0-9]+)\s*([a-zA-Z]+)\s*")
+
+
+def to_interval(interval: str | Literal) -> Interval:
+ """Builds an interval expression from a string like '1 day' or '5 months'."""
+ if isinstance(interval, Literal):
+ if not interval.is_string:
+ raise ValueError("Invalid interval string.")
+
+ interval = interval.this
+
+ interval_parts = INTERVAL_STRING_RE.match(interval) # type: ignore
+
+ if not interval_parts:
+ raise ValueError("Invalid interval string.")
+
+ return Interval(
+ this=Literal.string(interval_parts.group(1)),
+ unit=Var(this=interval_parts.group(2)),
+ )
+
+
@t.overload
def to_table(sql_path: str | Table, **kwargs) -> Table:
...
|
tobymao/sqlglot
|
6d95fc553510bbadb6e0089e0c6063582c2e6a69
|
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index 446247c3..4267a4dc 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -715,6 +715,7 @@ class TestDialect(Validator):
"hive": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"presto": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"spark": "AGGREGATE(x, 0, (acc, x) -> acc + x, acc -> acc)",
+ "presto": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
},
)
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 70a6820b..780cac0c 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -113,11 +113,19 @@ class TestPostgres(Validator):
self.validate_identity("x ~* 'y'")
self.validate_all(
- "GENERATE_SERIES(a, b, c)",
+ "GENERATE_SERIES(a, b, ' 2 days ')",
write={
- "postgres": "GENERATE_SERIES(a, b, c)",
- "presto": "SEQUENCE(a, b, c)",
- "trino": "SEQUENCE(a, b, c)",
+ "postgres": "GENERATE_SERIES(a, b, INTERVAL '2' days)",
+ "presto": "SEQUENCE(a, b, INTERVAL '2' days)",
+ "trino": "SEQUENCE(a, b, INTERVAL '2' days)",
+ },
+ )
+ self.validate_all(
+ "GENERATE_SERIES('2019-01-01'::TIMESTAMP, NOW(), '1day')",
+ write={
+ "postgres": "GENERATE_SERIES(CAST('2019-01-01' AS TIMESTAMP), CURRENT_TIMESTAMP, INTERVAL '1' day)",
+ "presto": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' day)",
+ "trino": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' day)",
},
)
self.validate_all(
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index b5350481..9c8e26e7 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -314,6 +314,11 @@ class TestPresto(Validator):
def test_presto(self):
self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)")
+ self.validate_identity("SELECT * FROM (VALUES (1))")
+ self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
+ self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
+ self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
+
self.validate_all(
'SELECT a."b" FROM "foo"',
write={
@@ -455,10 +460,6 @@ class TestPresto(Validator):
"spark": UnsupportedError,
},
)
- self.validate_identity("SELECT * FROM (VALUES (1))")
- self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
- self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
- self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
def test_encode_decode(self):
self.validate_all(
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 02d43aaa..be74a27f 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -212,6 +212,17 @@ TBLPROPERTIES (
self.validate_identity("TRIM(BOTH 'SL' FROM 'SSparkSQLS')")
self.validate_identity("TRIM(LEADING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
+
+ self.validate_all(
+ "AGGREGATE(my_arr, 0, (acc, x) -> acc + x, s -> s * 2)",
+ write={
+ "trino": "REDUCE(my_arr, 0, (acc, x) -> acc + x, s -> s * 2)",
+ "duckdb": "REDUCE(my_arr, 0, (acc, x) -> acc + x, s -> s * 2)",
+ "hive": "REDUCE(my_arr, 0, (acc, x) -> acc + x, s -> s * 2)",
+ "presto": "REDUCE(my_arr, 0, (acc, x) -> acc + x, s -> s * 2)",
+ "spark": "AGGREGATE(my_arr, 0, (acc, x) -> acc + x, s -> s * 2)",
+ },
+ )
self.validate_all(
"TRIM('SL', 'SSparkSQLS')", write={"spark": "TRIM('SL' FROM 'SSparkSQLS')"}
)
diff --git a/tests/test_expressions.py b/tests/test_expressions.py
index f0639ed2..55e07d13 100644
--- a/tests/test_expressions.py
+++ b/tests/test_expressions.py
@@ -631,6 +631,19 @@ FROM foo""",
FROM foo""",
)
+ def test_to_interval(self):
+ self.assertEqual(exp.to_interval("1day").sql(), "INTERVAL '1' day")
+ self.assertEqual(exp.to_interval(" 5 months").sql(), "INTERVAL '5' months")
+ with self.assertRaises(ValueError):
+ exp.to_interval("bla")
+
+ self.assertEqual(exp.to_interval(exp.Literal.string("1day")).sql(), "INTERVAL '1' day")
+ self.assertEqual(
+ exp.to_interval(exp.Literal.string(" 5 months")).sql(), "INTERVAL '5' months"
+ )
+ with self.assertRaises(ValueError):
+ exp.to_interval(exp.Literal.string("bla"))
+
def test_to_table(self):
table_only = exp.to_table("table_name")
self.assertEqual(table_only.name, "table_name")
|
generate_series() is not transpiled to sequence() when going from postgres to trino sql.
Something like this in postgres
`generate_series('2019-01-01'::timestamp, now(), '1day') as day`
should be the following in trino
`sequence(cast('2019-01-01' as timestamp), cast(now() as timestamp), '1' day) as day`
|
0.0
|
6d95fc553510bbadb6e0089e0c6063582c2e6a69
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_spark.py::TestSpark::test_spark",
"tests/test_expressions.py::TestExpressions::test_to_interval"
] |
[
"tests/dialects/test_dialect.py::TestDialect::test_alias",
"tests/dialects/test_dialect.py::TestDialect::test_array",
"tests/dialects/test_dialect.py::TestDialect::test_cast",
"tests/dialects/test_dialect.py::TestDialect::test_cross_join",
"tests/dialects/test_dialect.py::TestDialect::test_enum",
"tests/dialects/test_dialect.py::TestDialect::test_get_or_raise",
"tests/dialects/test_dialect.py::TestDialect::test_hash_comments",
"tests/dialects/test_dialect.py::TestDialect::test_json",
"tests/dialects/test_dialect.py::TestDialect::test_lateral_subquery",
"tests/dialects/test_dialect.py::TestDialect::test_limit",
"tests/dialects/test_dialect.py::TestDialect::test_merge",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_eq",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_neq",
"tests/dialects/test_dialect.py::TestDialect::test_operators",
"tests/dialects/test_dialect.py::TestDialect::test_order_by",
"tests/dialects/test_dialect.py::TestDialect::test_set_operators",
"tests/dialects/test_dialect.py::TestDialect::test_time",
"tests/dialects/test_dialect.py::TestDialect::test_transactions",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/test_expressions.py::TestExpressions::test_alias",
"tests/test_expressions.py::TestExpressions::test_alias_column_names",
"tests/test_expressions.py::TestExpressions::test_alias_or_name",
"tests/test_expressions.py::TestExpressions::test_arg_key",
"tests/test_expressions.py::TestExpressions::test_column",
"tests/test_expressions.py::TestExpressions::test_comment_alias",
"tests/test_expressions.py::TestExpressions::test_convert",
"tests/test_expressions.py::TestExpressions::test_ctes",
"tests/test_expressions.py::TestExpressions::test_data_type_builder",
"tests/test_expressions.py::TestExpressions::test_depth",
"tests/test_expressions.py::TestExpressions::test_eq",
"tests/test_expressions.py::TestExpressions::test_find",
"tests/test_expressions.py::TestExpressions::test_find_all",
"tests/test_expressions.py::TestExpressions::test_find_ancestor",
"tests/test_expressions.py::TestExpressions::test_function_building",
"tests/test_expressions.py::TestExpressions::test_function_normalizer",
"tests/test_expressions.py::TestExpressions::test_functions",
"tests/test_expressions.py::TestExpressions::test_hash",
"tests/test_expressions.py::TestExpressions::test_identifier",
"tests/test_expressions.py::TestExpressions::test_named_selects",
"tests/test_expressions.py::TestExpressions::test_pop",
"tests/test_expressions.py::TestExpressions::test_properties_from_dict",
"tests/test_expressions.py::TestExpressions::test_rename_table",
"tests/test_expressions.py::TestExpressions::test_replace",
"tests/test_expressions.py::TestExpressions::test_replace_placeholders",
"tests/test_expressions.py::TestExpressions::test_replace_tables",
"tests/test_expressions.py::TestExpressions::test_selects",
"tests/test_expressions.py::TestExpressions::test_sql",
"tests/test_expressions.py::TestExpressions::test_table",
"tests/test_expressions.py::TestExpressions::test_table_name",
"tests/test_expressions.py::TestExpressions::test_text",
"tests/test_expressions.py::TestExpressions::test_to_column",
"tests/test_expressions.py::TestExpressions::test_to_table",
"tests/test_expressions.py::TestExpressions::test_transform_multiple_children",
"tests/test_expressions.py::TestExpressions::test_transform_no_infinite_recursion",
"tests/test_expressions.py::TestExpressions::test_transform_node_removal",
"tests/test_expressions.py::TestExpressions::test_transform_simple",
"tests/test_expressions.py::TestExpressions::test_transform_with_arguments",
"tests/test_expressions.py::TestExpressions::test_union",
"tests/test_expressions.py::TestExpressions::test_unit",
"tests/test_expressions.py::TestExpressions::test_values",
"tests/test_expressions.py::TestExpressions::test_walk"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-03 23:06:59+00:00
|
mit
| 5,934 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.