instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lovasoa__marshmallow_dataclass-110
|
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 05f39b9..9f4b770 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -451,6 +451,17 @@ def field_for_schema(
metadata.setdefault("allow_none", True)
return marshmallow.fields.Raw(**metadata)
+ if typing_inspect.is_literal_type(typ):
+ arguments = typing_inspect.get_args(typ)
+ return marshmallow.fields.Raw(
+ validate=(
+ marshmallow.validate.Equal(arguments[0])
+ if len(arguments) == 1
+ else marshmallow.validate.OneOf(arguments)
+ ),
+ **metadata,
+ )
+
# Generic types
origin = typing_inspect.get_origin(typ)
if origin:
diff --git a/setup.py b/setup.py
index 4f7f771..fc65498 100644
--- a/setup.py
+++ b/setup.py
@@ -23,6 +23,10 @@ EXTRAS_REQUIRE = {
# re: pypy: typed-ast (a dependency of mypy) fails to install on pypy
# https://github.com/python/typed_ast/issues/111
"pytest-mypy-plugins>=1.2.0; implementation_name != 'pypy'",
+ # `Literal` was introduced in:
+ # - Python 3.8 (https://www.python.org/dev/peps/pep-0586)
+ # - typing-extensions 3.7.2 (https://github.com/python/typing/pull/591)
+ "typing-extensions~=3.7.2; python_version < '3.8'",
],
}
EXTRAS_REQUIRE["dev"] = (
|
lovasoa/marshmallow_dataclass
|
7c20fb97b982cb96538c484bdfbd16ff47815476
|
diff --git a/tests/test_class_schema.py b/tests/test_class_schema.py
index b6c9837..5aea442 100644
--- a/tests/test_class_schema.py
+++ b/tests/test_class_schema.py
@@ -3,6 +3,11 @@ import unittest
from typing import Any
from uuid import UUID
+try:
+ from typing import Literal
+except ImportError:
+ from typing_extensions import Literal # type: ignore
+
import dataclasses
from marshmallow import Schema, ValidationError
from marshmallow.fields import Field, UUID as UUIDField, List as ListField, Integer
@@ -132,6 +137,31 @@ class TestClassSchema(unittest.TestCase):
schema = class_schema(A)()
self.assertRaises(ValidationError, lambda: schema.load({"data": None}))
+ def test_literal(self):
+ @dataclasses.dataclass
+ class A:
+ data: Literal["a"]
+
+ schema = class_schema(A)()
+ self.assertEqual(A(data="a"), schema.load({"data": "a"}))
+ self.assertEqual(schema.dump(A(data="a")), {"data": "a"})
+ for data in ["b", 2, 2.34, False]:
+ with self.assertRaises(ValidationError):
+ schema.load({"data": data})
+
+ def test_literal_multiple_types(self):
+ @dataclasses.dataclass
+ class A:
+ data: Literal["a", 1, 1.23, True]
+
+ schema = class_schema(A)()
+ for data in ["a", 1, 1.23, True]:
+ self.assertEqual(A(data=data), schema.load({"data": data}))
+ self.assertEqual(schema.dump(A(data=data)), {"data": data})
+ for data in ["b", 2, 2.34, False]:
+ with self.assertRaises(ValidationError):
+ schema.load({"data": data})
+
def test_validator_stacking(self):
# See: https://github.com/lovasoa/marshmallow_dataclass/issues/91
class SimpleValidator(Validator):
diff --git a/tests/test_field_for_schema.py b/tests/test_field_for_schema.py
index 96df872..4a388cc 100644
--- a/tests/test_field_for_schema.py
+++ b/tests/test_field_for_schema.py
@@ -4,7 +4,12 @@ import unittest
from enum import Enum
from typing import Dict, Optional, Union, Any, List, Tuple
-from marshmallow import fields, Schema
+try:
+ from typing import Literal
+except ImportError:
+ from typing_extensions import Literal # type: ignore
+
+from marshmallow import fields, Schema, validate
from marshmallow_dataclass import field_for_schema, dataclass, union_field
@@ -88,6 +93,18 @@ class TestFieldForSchema(unittest.TestCase):
marshmallow_enum.EnumField(enum=Color, required=True),
)
+ def test_literal(self):
+ self.assertFieldsEqual(
+ field_for_schema(Literal["a"]),
+ fields.Raw(required=True, validate=validate.Equal("a")),
+ )
+
+ def test_literal_multiple_types(self):
+ self.assertFieldsEqual(
+ field_for_schema(Literal["a", 1, 1.23, True]),
+ fields.Raw(required=True, validate=validate.OneOf(("a", 1, 1.23, True))),
+ )
+
def test_union(self):
self.assertFieldsEqual(
field_for_schema(Union[int, str]),
|
Literal field and tagged union
The following simple example, which uses a [`Literal`](https://www.python.org/dev/peps/pep-0586/), doesn't work (because `marshmallow-dataclass` doesn't seem to support `Literal`):
```python
from dataclasses import dataclass
from marshmallow_dataclass import class_schema
from typing_extensions import Literal
@dataclass
class A:
choices: Literal['x', 'y']
Schema = class_schema(A) # ERROR
```
This is the error:
```
.../site-packages/marshmallow_dataclass/__init__.py:316: UserWarning: marshmallow_dataclass was called on the class typing_extensions.Literal['x', 'y'], which is not a dataclass. It is going to try and convert the class into a dataclass, which may have undesirable side effects. To avoid this message, make sure all your classes and all the classes of their fields are either explicitly supported by marshmallow_datcalass, or are already dataclasses. For more information, see https://github.com/lovasoa/marshmallow_dataclass/issues/51
f"marshmallow_dataclass was called on the class {clazz}, which is not a dataclass. "
Traceback (most recent call last):
File ".../site-packages/dataclasses.py", line 970, in fields
fields = getattr(class_or_instance, _FIELDS)
AttributeError: '_Literal' object has no attribute '__dataclass_fields__'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File ".../site-packages/marshmallow_dataclass/__init__.py", line 312, in _internal_class_schema
fields: Tuple[dataclasses.Field, ...] = dataclasses.fields(clazz)
File ".../site-packages/dataclasses.py", line 972, in fields
raise TypeError('must be called with a dataclass type or instance')
TypeError: must be called with a dataclass type or instance
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File ".../site-packages/marshmallow_dataclass/__init__.py", line 323, in _internal_class_schema
created_dataclass: type = dataclasses.dataclass(clazz)
File ".../site-packages/dataclasses.py", line 958, in dataclass
return wrap(_cls)
File ".../site-packages/dataclasses.py", line 950, in wrap
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
File ".../site-packages/dataclasses.py", line 764, in _process_class
unsafe_hash, frozen))
AttributeError: '_Literal' object has no attribute '__dataclass_params__'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../site-packages/marshmallow_dataclass/__init__.py", line 303, in class_schema
return _internal_class_schema(clazz, base_schema)
File ".../site-packages/marshmallow_dataclass/__init__.py", line 344, in _internal_class_schema
for field in fields
File ".../site-packages/marshmallow_dataclass/__init__.py", line 345, in <genexpr>
if field.init
File ".../site-packages/marshmallow_dataclass/__init__.py", line 526, in field_for_schema
nested_schema or forward_reference or _internal_class_schema(typ, base_schema)
File ".../site-packages/marshmallow_dataclass/__init__.py", line 327, in _internal_class_schema
f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one."
TypeError: typing_extensions.Literal['x', 'y'] is not a dataclass and cannot be turned into one.
```
I think `Literal` is quite useful, e.g. for creating tagged unions. Is there any chance to add support for `Literal` to `marshmallow-dataclass`?
|
0.0
|
7c20fb97b982cb96538c484bdfbd16ff47815476
|
[
"tests/test_class_schema.py::TestClassSchema::test_literal",
"tests/test_class_schema.py::TestClassSchema::test_literal_multiple_types",
"tests/test_field_for_schema.py::TestFieldForSchema::test_literal",
"tests/test_field_for_schema.py::TestFieldForSchema::test_literal_multiple_types"
] |
[
"tests/test_class_schema.py::TestClassSchema::test_any_none",
"tests/test_class_schema.py::TestClassSchema::test_any_none_disallowed",
"tests/test_class_schema.py::TestClassSchema::test_filtering_list_schema",
"tests/test_class_schema.py::TestClassSchema::test_simple_unique_schemas",
"tests/test_class_schema.py::TestClassSchema::test_use_type_mapping_from_base_schema",
"tests/test_class_schema.py::TestClassSchema::test_validator_stacking",
"tests/test_field_for_schema.py::TestFieldForSchema::test_any",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_dict",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_list",
"tests/test_field_for_schema.py::TestFieldForSchema::test_dict_from_typing",
"tests/test_field_for_schema.py::TestFieldForSchema::test_enum",
"tests/test_field_for_schema.py::TestFieldForSchema::test_explicit_field",
"tests/test_field_for_schema.py::TestFieldForSchema::test_int",
"tests/test_field_for_schema.py::TestFieldForSchema::test_marshmallow_dataclass",
"tests/test_field_for_schema.py::TestFieldForSchema::test_newtype",
"tests/test_field_for_schema.py::TestFieldForSchema::test_optional_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_override_container_type_with_type_mapping",
"tests/test_field_for_schema.py::TestFieldForSchema::test_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union_multiple_types_with_none"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-26 16:46:48+00:00
|
mit
| 3,635 |
|
lovasoa__marshmallow_dataclass-121
|
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 6367163..7da3ac2 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -297,6 +297,19 @@ def class_schema(
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'name': ['Name too long']}
+
+ You can use the ``metadata`` argument to override default field behaviour, e.g. the fact that
+ ``Optional`` fields allow ``None`` values:
+
+ >>> @dataclasses.dataclass
+ ... class Custom:
+ ... name: Optional[str] = dataclasses.field(metadata={"allow_none": False})
+ >>> class_schema(Custom)().load({"name": None})
+ Traceback (most recent call last):
+ ...
+ marshmallow.exceptions.ValidationError: {'name': ['Field may not be null.']}
+ >>> class_schema(Custom)().load({})
+ Custom(name=None)
"""
if not dataclasses.is_dataclass(clazz):
clazz = dataclasses.dataclass(clazz)
@@ -496,6 +509,7 @@ def field_for_schema(
)
elif typing_inspect.is_union_type(typ):
if typing_inspect.is_optional_type(typ):
+ metadata["allow_none"] = metadata.get("allow_none", True)
metadata["default"] = metadata.get("default", None)
metadata["missing"] = metadata.get("missing", None)
metadata["required"] = False
|
lovasoa/marshmallow_dataclass
|
cd0d2cf8c0ad38509259abf3ac8fd5555301cec8
|
diff --git a/tests/test_optional.py b/tests/test_optional.py
new file mode 100644
index 0000000..fbe9edc
--- /dev/null
+++ b/tests/test_optional.py
@@ -0,0 +1,39 @@
+import unittest
+from dataclasses import field
+from typing import Optional
+
+import marshmallow
+
+from marshmallow_dataclass import dataclass
+
+
+class TestOptionalField(unittest.TestCase):
+ def test_optional_field(self):
+ @dataclass
+ class OptionalValue:
+ value: Optional[str] = "value"
+
+ schema = OptionalValue.Schema()
+
+ self.assertEqual(schema.load({"value": None}), OptionalValue(value=None))
+ self.assertEqual(schema.load({"value": "hello"}), OptionalValue(value="hello"))
+ self.assertEqual(schema.load({}), OptionalValue())
+
+ def test_optional_field_not_none(self):
+ @dataclass
+ class OptionalValueNotNone:
+ value: Optional[str] = field(
+ default="value", metadata={"allow_none": False}
+ )
+
+ schema = OptionalValueNotNone.Schema()
+
+ self.assertEqual(schema.load({}), OptionalValueNotNone())
+ self.assertEqual(
+ schema.load({"value": "hello"}), OptionalValueNotNone(value="hello")
+ )
+ with self.assertRaises(marshmallow.exceptions.ValidationError) as exc_cm:
+ schema.load({"value": None})
+ self.assertEqual(
+ exc_cm.exception.messages, {"value": ["Field may not be null."]}
+ )
|
Error serialisation of Optional field and default value set is not None
```python
from dataclasses import dataclass, field
from typing import List, Optional
import marshmallow_dataclass
import marshmallow.validate
@dataclass
class Building:
# field metadata is used to instantiate the marshmallow field
height: float = field(metadata={"validate": marshmallow.validate.Range(min=0)})
name: str = field(default="anonymous")
age: Optional[int] = field(default=0)
building_schema = marshmallow_dataclass.class_schema(Building)()
building_schema.load({"name": "Eiffel Tower", "height": 324, "age": None})
```
For above code I am getting this error:-
```bash
building_schema({"name": "Eiffel Tower", "height": 324, "age": None})
File "/XXX/.venv/lib/python3.7/site-packages/marshmallow_dataclass/__init__.py", line 544, in load
all_loaded = super().load(data, many=many, **kwargs)
File "/XXX/.venv/lib/python3.7/site-packages/marshmallow/schema.py", line 724, in load
data, many=many, partial=partial, unknown=unknown, postprocess=True
File "/XXX/.venv/lib/python3.7/site-packages/marshmallow/schema.py", line 911, in _do_load
raise exc
marshmallow.exceptions.ValidationError: {'age': ['Field may not be null.']}
```
Error is from marshmallow schema, but I am not able to find root cause of the error. Any help would be appreciated.
When `age: Optional[int] = field(default=None)` above code works.
Python 3.7
marshmallow_dataclass tried on 8.0.0 and 8.1.0
marshmallow 3.8.0
|
0.0
|
cd0d2cf8c0ad38509259abf3ac8fd5555301cec8
|
[
"tests/test_optional.py::TestOptionalField::test_optional_field"
] |
[
"tests/test_optional.py::TestOptionalField::test_optional_field_not_none"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-08 16:32:55+00:00
|
mit
| 3,636 |
|
lovasoa__marshmallow_dataclass-150
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f47ee64..53fa59e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,7 @@
# marshmallow\_dataclass change log
+- Add support for the Final type. See [#150](https://github.com/lovasoa/marshmallow_dataclass/pull/150)
+
## v8.4.1
- Fix compatibility with older python versions.
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 081d476..f1cd81d 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -579,6 +579,16 @@ def field_for_schema(
**metadata,
)
+ if typing_inspect.is_final_type(typ):
+ arguments = typing_inspect.get_args(typ)
+ if arguments:
+ subtyp = arguments[0]
+ elif default is not marshmallow.missing:
+ subtyp = type(default)
+ else:
+ subtyp = Any
+ return field_for_schema(subtyp, default, metadata, base_schema)
+
# Generic types
generic_field = _field_for_generic_type(typ, base_schema, **metadata)
if generic_field:
diff --git a/setup.py b/setup.py
index ee17c86..d94d96a 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@ setup(
classifiers=CLASSIFIERS,
license="MIT",
python_requires=">=3.6",
- install_requires=["marshmallow>=3.0.0,<4.0", "typing-inspect"],
+ install_requires=["marshmallow>=3.0.0,<4.0", "typing-inspect>=0.7.0"],
extras_require=EXTRAS_REQUIRE,
package_data={"marshmallow_dataclass": ["py.typed"]},
)
|
lovasoa/marshmallow_dataclass
|
f34066f9785c55e6a39451ae86cedeb59e51da30
|
diff --git a/tests/test_class_schema.py b/tests/test_class_schema.py
index 18dc762..02f3ba3 100644
--- a/tests/test_class_schema.py
+++ b/tests/test_class_schema.py
@@ -1,12 +1,12 @@
import typing
import unittest
-from typing import Any
+from typing import Any, TYPE_CHECKING
from uuid import UUID
try:
- from typing import Literal # type: ignore[attr-defined]
+ from typing import Final, Literal # type: ignore[attr-defined]
except ImportError:
- from typing_extensions import Literal # type: ignore[misc]
+ from typing_extensions import Final, Literal # type: ignore[misc]
import dataclasses
from marshmallow import Schema, ValidationError
@@ -162,6 +162,60 @@ class TestClassSchema(unittest.TestCase):
with self.assertRaises(ValidationError):
schema.load({"data": data})
+ def test_final(self):
+ @dataclasses.dataclass
+ class A:
+ # Mypy currently considers read-only dataclass attributes without a
+ # default value an error.
+ # See: https://github.com/python/mypy/issues/10688.
+ data: Final[str] # type: ignore[misc]
+
+ schema = class_schema(A)()
+ self.assertEqual(A(data="a"), schema.load({"data": "a"}))
+ self.assertEqual(schema.dump(A(data="a")), {"data": "a"})
+ for data in [2, 2.34, False]:
+ with self.assertRaises(ValidationError):
+ schema.load({"data": data})
+
+ def test_final_infers_type_from_default(self):
+ # @dataclasses.dataclass
+ class A:
+ data: Final = "a"
+
+ # @dataclasses.dataclass
+ class B:
+ data: Final = A()
+
+ # NOTE: This workaround is needed to avoid a Mypy crash.
+ # See: https://github.com/python/mypy/issues/10090#issuecomment-865971891
+ if not TYPE_CHECKING:
+ A = dataclasses.dataclass(A)
+ B = dataclasses.dataclass(B)
+
+ schema_a = class_schema(A)()
+ self.assertEqual(A(data="a"), schema_a.load({}))
+ self.assertEqual(A(data="a"), schema_a.load({"data": "a"}))
+ self.assertEqual(A(data="b"), schema_a.load({"data": "b"}))
+ self.assertEqual(schema_a.dump(A()), {"data": "a"})
+ self.assertEqual(schema_a.dump(A(data="a")), {"data": "a"})
+ self.assertEqual(schema_a.dump(A(data="b")), {"data": "b"})
+ for data in [2, 2.34, False]:
+ with self.assertRaises(ValidationError):
+ schema_a.load({"data": data})
+
+ schema_b = class_schema(B)()
+ self.assertEqual(B(data=A()), schema_b.load({}))
+ self.assertEqual(B(data=A()), schema_b.load({"data": {}}))
+ self.assertEqual(B(data=A()), schema_b.load({"data": {"data": "a"}}))
+ self.assertEqual(B(data=A(data="b")), schema_b.load({"data": {"data": "b"}}))
+ self.assertEqual(schema_b.dump(B()), {"data": {"data": "a"}})
+ self.assertEqual(schema_b.dump(B(data=A())), {"data": {"data": "a"}})
+ self.assertEqual(schema_b.dump(B(data=A(data="a"))), {"data": {"data": "a"}})
+ self.assertEqual(schema_b.dump(B(data=A(data="b"))), {"data": {"data": "b"}})
+ for data in [2, 2.34, False]:
+ with self.assertRaises(ValidationError):
+ schema_b.load({"data": data})
+
def test_validator_stacking(self):
# See: https://github.com/lovasoa/marshmallow_dataclass/issues/91
class SimpleValidator(Validator):
diff --git a/tests/test_field_for_schema.py b/tests/test_field_for_schema.py
index 50ef60b..c43c18f 100644
--- a/tests/test_field_for_schema.py
+++ b/tests/test_field_for_schema.py
@@ -5,9 +5,9 @@ from enum import Enum
from typing import Dict, Optional, Union, Any, List, Tuple
try:
- from typing import Literal # type: ignore[attr-defined]
+ from typing import Final, Literal # type: ignore[attr-defined]
except ImportError:
- from typing_extensions import Literal # type: ignore[misc]
+ from typing_extensions import Final, Literal # type: ignore[misc]
from marshmallow import fields, Schema, validate
@@ -110,6 +110,16 @@ class TestFieldForSchema(unittest.TestCase):
fields.Raw(required=True, validate=validate.OneOf(("a", 1, 1.23, True))),
)
+ def test_final(self):
+ self.assertFieldsEqual(
+ field_for_schema(Final[str]), fields.String(required=True)
+ )
+
+ def test_final_without_type(self):
+ self.assertFieldsEqual(
+ field_for_schema(Final), fields.Raw(required=True, allow_none=True)
+ )
+
def test_union(self):
self.assertFieldsEqual(
field_for_schema(Union[int, str]),
|
Final type
The following simple example, which uses the [`Final`](https://docs.python.org/3/library/typing.html#typing.Final) type hint, doesn't work (because `marshmallow-dataclass` doesn't seem to support `Final`):
```python
from dataclasses import dataclass
from marshmallow_dataclass import class_schema
from typing_extensions import Final
@dataclass
class A:
value: Final[int]
Schema = class_schema(A) # ERROR
```
This is the error:
```
.../site-packages/marshmallow_dataclass/__init__.py:316: UserWarning: marshmallow_dataclass was called on the class typing_extensions.Final[int], which is not a dataclass. It is going to try and convert the class into a dataclass, which may have undesirable side effects. To avoid this message, make sure all your classes and all the classes of their fields are either explicitly supported by marshmallow_datcalass, or are already dataclasses. For more information, see https://github.com/lovasoa/marshmallow_dataclass/issues/51
f"marshmallow_dataclass was called on the class {clazz}, which is not a dataclass. "
Traceback (most recent call last):
File ".../site-packages/dataclasses.py", line 970, in fields
fields = getattr(class_or_instance, _FIELDS)
AttributeError: '_Final' object has no attribute '__dataclass_fields__'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File ".../site-packages/marshmallow_dataclass/__init__.py", line 312, in _internal_class_schema
fields: Tuple[dataclasses.Field, ...] = dataclasses.fields(clazz)
File ".../site-packages/dataclasses.py", line 972, in fields
raise TypeError('must be called with a dataclass type or instance')
TypeError: must be called with a dataclass type or instance
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File ".../site-packages/marshmallow_dataclass/__init__.py", line 323, in _internal_class_schema
created_dataclass: type = dataclasses.dataclass(clazz)
File ".../site-packages/dataclasses.py", line 958, in dataclass
return wrap(_cls)
File ".../site-packages/dataclasses.py", line 950, in wrap
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
File ".../site-packages/dataclasses.py", line 764, in _process_class
unsafe_hash, frozen))
AttributeError: '_Final' object has no attribute '__dataclass_params__'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../site-packages/marshmallow_dataclass/__init__.py", line 303, in class_schema
return _internal_class_schema(clazz, base_schema)
File ".../site-packages/marshmallow_dataclass/__init__.py", line 344, in _internal_class_schema
for field in fields
File ".../site-packages/marshmallow_dataclass/__init__.py", line 345, in <genexpr>
if field.init
File ".../site-packages/marshmallow_dataclass/__init__.py", line 526, in field_for_schema
nested_schema or forward_reference or _internal_class_schema(typ, base_schema)
File ".../site-packages/marshmallow_dataclass/__init__.py", line 327, in _internal_class_schema
f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one."
TypeError: typing_extensions.Final[int] is not a dataclass and cannot be turned into one.
```
I find the `Final` type quite useful for annotating a field to be read-only without runtime overhead.
|
0.0
|
f34066f9785c55e6a39451ae86cedeb59e51da30
|
[
"tests/test_class_schema.py::TestClassSchema::test_final",
"tests/test_class_schema.py::TestClassSchema::test_final_infers_type_from_default",
"tests/test_field_for_schema.py::TestFieldForSchema::test_final",
"tests/test_field_for_schema.py::TestFieldForSchema::test_final_without_type"
] |
[
"tests/test_class_schema.py::TestClassSchema::test_any_none",
"tests/test_class_schema.py::TestClassSchema::test_any_none_disallowed",
"tests/test_class_schema.py::TestClassSchema::test_filtering_list_schema",
"tests/test_class_schema.py::TestClassSchema::test_literal",
"tests/test_class_schema.py::TestClassSchema::test_literal_multiple_types",
"tests/test_class_schema.py::TestClassSchema::test_simple_unique_schemas",
"tests/test_class_schema.py::TestClassSchema::test_use_type_mapping_from_base_schema",
"tests/test_class_schema.py::TestClassSchema::test_validator_stacking",
"tests/test_field_for_schema.py::TestFieldForSchema::test_any",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_dict",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_list",
"tests/test_field_for_schema.py::TestFieldForSchema::test_dict_from_typing",
"tests/test_field_for_schema.py::TestFieldForSchema::test_enum",
"tests/test_field_for_schema.py::TestFieldForSchema::test_explicit_field",
"tests/test_field_for_schema.py::TestFieldForSchema::test_frozenset",
"tests/test_field_for_schema.py::TestFieldForSchema::test_int",
"tests/test_field_for_schema.py::TestFieldForSchema::test_literal",
"tests/test_field_for_schema.py::TestFieldForSchema::test_literal_multiple_types",
"tests/test_field_for_schema.py::TestFieldForSchema::test_mapping",
"tests/test_field_for_schema.py::TestFieldForSchema::test_marshmallow_dataclass",
"tests/test_field_for_schema.py::TestFieldForSchema::test_newtype",
"tests/test_field_for_schema.py::TestFieldForSchema::test_optional_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_override_container_type_with_type_mapping",
"tests/test_field_for_schema.py::TestFieldForSchema::test_sequence",
"tests/test_field_for_schema.py::TestFieldForSchema::test_set",
"tests/test_field_for_schema.py::TestFieldForSchema::test_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union_multiple_types_with_none"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-22 13:26:55+00:00
|
mit
| 3,637 |
|
lovasoa__marshmallow_dataclass-165
|
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 7891d61..878b2bc 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python_version: [3.6, 3.7, 3.8, 3.9, pypy3]
+ python_version: ["3.6", "3.7", "3.8", "3.9", "3.10", "pypy3"]
steps:
- uses: actions/checkout@v2
@@ -31,7 +31,7 @@ jobs:
python -m pip install --upgrade pip
pip install --pre -e '.[dev]'
- name: Pre-commit hooks
- if: ${{ matrix.python_version != 'pypy3' }}
+ if: ${{ matrix.python_version != 'pypy3' && matrix.python_version != '3.6' }}
run: pre-commit run --all-files
- name: Test with pytest
run: pytest
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4a79dc2..dde3d4d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,27 +1,27 @@
repos:
- repo: https://github.com/asottile/pyupgrade
- rev: v2.7.4
+ rev: v2.31.0 # Later versions do not support python 3.6
hooks:
- id: pyupgrade
args: ["--py36-plus"]
- repo: https://github.com/python/black
- rev: 20.8b1
+ rev: 22.3.0
hooks:
- id: black
language_version: python3
- repo: https://gitlab.com/pycqa/flake8
- rev: 3.8.4
+ rev: 3.9.2
hooks:
- id: flake8
additional_dependencies: ['flake8-bugbear==19.8.0']
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v0.790
+ rev: v0.931 # Later versions do not support python 3.6
hooks:
- id: mypy
additional_dependencies: [marshmallow-enum,typeguard,marshmallow]
args: [--show-error-codes]
- repo: https://github.com/asottile/blacken-docs
- rev: v1.9.1
+ rev: v1.12.0 # Later versions do not support python 3.6
hooks:
- id: blacken-docs
additional_dependencies: [black==19.3b0]
diff --git a/README.md b/README.md
index 47788ef..2079155 100644
--- a/README.md
+++ b/README.md
@@ -92,7 +92,32 @@ PersonSchema = marshmallow_dataclass.class_schema(Person)
The type of your fields must be either basic
[types supported by marshmallow](https://marshmallow.readthedocs.io/en/stable/api_reference.html#marshmallow.Schema.TYPE_MAPPING)
-(such as `float`, `str`, `bytes`, `datetime`, ...), or other dataclasses.
+(such as `float`, `str`, `bytes`, `datetime`, ...), `Union`, or other dataclasses.
+
+### Union (de)serialization coercion
+
+Typically the Union type; `Union[X, Y]` means—from a set theory perspective—either `X` or `Y`, i.e., an unordered set, howevever the order of the sub-types defines the precedence when attempting to ether deserialize or serialize the value per [here](https://github.com/lovasoa/marshmallow_dataclass/blob/master/marshmallow_dataclass/union_field.py).
+
+For example,
+
+```python
+from typing import Union
+
+from dataclasses import dataclass
+
+
+@dataclass
+class Person:
+ name: str
+ age: Union[int, float]
+
+
+PersonSchema = marshmallow_dataclass.class_schema(Person)
+PersonSchema().load({"name": "jane", "age": 50.0})
+# => Person(name="jane", age=50)
+```
+
+will first (sucessfully) try to coerce `50.0` to an `int`. If coercion is not desired the `Any` type can be used with the caveat that values will not be type checked without additional [validation](https://marshmallow.readthedocs.io/en/stable/marshmallow.validate.html).
### Customizing generated fields
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 0459b95..30c2947 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -37,9 +37,10 @@ Full example::
import collections.abc
import dataclasses
import inspect
+import types
import warnings
from enum import EnumMeta
-from functools import lru_cache
+from functools import lru_cache, partial
from typing import (
Any,
Callable,
@@ -53,6 +54,7 @@ from typing import (
TypeVar,
Union,
cast,
+ get_type_hints,
overload,
Sequence,
FrozenSet,
@@ -61,6 +63,9 @@ from typing import (
import marshmallow
import typing_inspect
+from marshmallow_dataclass.lazy_class_attribute import lazy_class_attribute
+
+
__all__ = ["dataclass", "add_schema", "class_schema", "field_for_schema", "NewType"]
NoneType = type(None)
@@ -83,6 +88,7 @@ def dataclass(
unsafe_hash: bool = False,
frozen: bool = False,
base_schema: Optional[Type[marshmallow.Schema]] = None,
+ cls_frame: Optional[types.FrameType] = None,
) -> Type[_U]:
...
@@ -96,6 +102,7 @@ def dataclass(
unsafe_hash: bool = False,
frozen: bool = False,
base_schema: Optional[Type[marshmallow.Schema]] = None,
+ cls_frame: Optional[types.FrameType] = None,
) -> Callable[[Type[_U]], Type[_U]]:
...
@@ -112,12 +119,15 @@ def dataclass(
unsafe_hash: bool = False,
frozen: bool = False,
base_schema: Optional[Type[marshmallow.Schema]] = None,
+ cls_frame: Optional[types.FrameType] = None,
) -> Union[Type[_U], Callable[[Type[_U]], Type[_U]]]:
"""
This decorator does the same as dataclasses.dataclass, but also applies :func:`add_schema`.
It adds a `.Schema` attribute to the class object
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
+ :param cls_frame: frame of cls definition, used to obtain locals with other classes definitions.
+ If None is passed the caller frame will be treated as cls_frame
>>> @dataclass
... class Artist:
@@ -140,9 +150,10 @@ def dataclass(
dc = dataclasses.dataclass( # type: ignore
_cls, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
+ cls_frame = cls_frame or inspect.stack()[1][0]
if _cls is None:
- return lambda cls: add_schema(dc(cls), base_schema)
- return add_schema(dc, base_schema)
+ return lambda cls: add_schema(dc(cls), base_schema, cls_frame=cls_frame)
+ return add_schema(dc, base_schema, cls_frame=cls_frame)
@overload
@@ -159,18 +170,21 @@ def add_schema(
@overload
def add_schema(
- _cls: Type[_U], base_schema: Type[marshmallow.Schema] = None
+ _cls: Type[_U],
+ base_schema: Type[marshmallow.Schema] = None,
+ cls_frame: types.FrameType = None,
) -> Type[_U]:
...
-def add_schema(_cls=None, base_schema=None):
+def add_schema(_cls=None, base_schema=None, cls_frame=None):
"""
This decorator adds a marshmallow schema as the 'Schema' attribute in a dataclass.
It uses :func:`class_schema` internally.
:param type _cls: The dataclass to which a Schema should be added
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
+ :param cls_frame: frame of cls definition
>>> class BaseSchema(marshmallow.Schema):
... def on_bind_field(self, field_name, field_obj):
@@ -187,20 +201,27 @@ def add_schema(_cls=None, base_schema=None):
def decorator(clazz: Type[_U]) -> Type[_U]:
# noinspection PyTypeHints
- clazz.Schema = class_schema(clazz, base_schema) # type: ignore
+ clazz.Schema = lazy_class_attribute( # type: ignore
+ partial(class_schema, clazz, base_schema, cls_frame),
+ "Schema",
+ clazz.__name__,
+ )
return clazz
return decorator(_cls) if _cls else decorator
def class_schema(
- clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
+ clazz: type,
+ base_schema: Optional[Type[marshmallow.Schema]] = None,
+ clazz_frame: types.FrameType = None,
) -> Type[marshmallow.Schema]:
"""
Convert a class to a marshmallow schema
:param clazz: A python class (may be a dataclass)
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
+ :param clazz_frame: frame of cls definition
:return: A marshmallow Schema corresponding to the dataclass
.. note::
@@ -315,12 +336,14 @@ def class_schema(
"""
if not dataclasses.is_dataclass(clazz):
clazz = dataclasses.dataclass(clazz)
- return _internal_class_schema(clazz, base_schema)
+ return _internal_class_schema(clazz, base_schema, clazz_frame)
@lru_cache(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE)
def _internal_class_schema(
- clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
+ clazz: type,
+ base_schema: Optional[Type[marshmallow.Schema]] = None,
+ clazz_frame: types.FrameType = None,
) -> Type[marshmallow.Schema]:
try:
# noinspection PyDataclass
@@ -339,7 +362,7 @@ def _internal_class_schema(
"****** WARNING ******"
)
created_dataclass: type = dataclasses.dataclass(clazz)
- return _internal_class_schema(created_dataclass, base_schema)
+ return _internal_class_schema(created_dataclass, base_schema, clazz_frame)
except Exception:
raise TypeError(
f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one."
@@ -351,12 +374,20 @@ def _internal_class_schema(
for k, v in inspect.getmembers(clazz)
if hasattr(v, "__marshmallow_hook__") or k in MEMBERS_WHITELIST
}
+
# Update the schema members to contain marshmallow fields instead of dataclass fields
+ type_hints = get_type_hints(
+ clazz, localns=clazz_frame.f_locals if clazz_frame else None
+ )
attributes.update(
(
field.name,
field_for_schema(
- field.type, _get_field_default(field), field.metadata, base_schema
+ type_hints[field.name],
+ _get_field_default(field),
+ field.metadata,
+ base_schema,
+ clazz_frame,
),
)
for field in fields
@@ -381,6 +412,7 @@ def _field_by_supertype(
newtype_supertype: Type,
metadata: dict,
base_schema: Optional[Type[marshmallow.Schema]],
+ typ_frame: Optional[types.FrameType],
) -> marshmallow.fields.Field:
"""
Return a new field for fields based on a super field. (Usually spawned from NewType)
@@ -411,6 +443,7 @@ def _field_by_supertype(
metadata=metadata,
default=default,
base_schema=base_schema,
+ typ_frame=typ_frame,
)
@@ -432,7 +465,10 @@ def _generic_type_add_any(typ: type) -> type:
def _field_for_generic_type(
- typ: type, base_schema: Optional[Type[marshmallow.Schema]], **metadata: Any
+ typ: type,
+ base_schema: Optional[Type[marshmallow.Schema]],
+ typ_frame: Optional[types.FrameType],
+ **metadata: Any,
) -> Optional[marshmallow.fields.Field]:
"""
If the type is a generic interface, resolve the arguments and construct the appropriate Field.
@@ -444,7 +480,9 @@ def _field_for_generic_type(
type_mapping = base_schema.TYPE_MAPPING if base_schema else {}
if origin in (list, List):
- child_type = field_for_schema(arguments[0], base_schema=base_schema)
+ child_type = field_for_schema(
+ arguments[0], base_schema=base_schema, typ_frame=typ_frame
+ )
list_type = cast(
Type[marshmallow.fields.List],
type_mapping.get(List, marshmallow.fields.List),
@@ -453,25 +491,32 @@ def _field_for_generic_type(
if origin in (collections.abc.Sequence, Sequence):
from . import collection_field
- child_type = field_for_schema(arguments[0], base_schema=base_schema)
+ child_type = field_for_schema(
+ arguments[0], base_schema=base_schema, typ_frame=typ_frame
+ )
return collection_field.Sequence(cls_or_instance=child_type, **metadata)
if origin in (set, Set):
from . import collection_field
- child_type = field_for_schema(arguments[0], base_schema=base_schema)
+ child_type = field_for_schema(
+ arguments[0], base_schema=base_schema, typ_frame=typ_frame
+ )
return collection_field.Set(
cls_or_instance=child_type, frozen=False, **metadata
)
if origin in (frozenset, FrozenSet):
from . import collection_field
- child_type = field_for_schema(arguments[0], base_schema=base_schema)
+ child_type = field_for_schema(
+ arguments[0], base_schema=base_schema, typ_frame=typ_frame
+ )
return collection_field.Set(
cls_or_instance=child_type, frozen=True, **metadata
)
if origin in (tuple, Tuple):
children = tuple(
- field_for_schema(arg, base_schema=base_schema) for arg in arguments
+ field_for_schema(arg, base_schema=base_schema, typ_frame=typ_frame)
+ for arg in arguments
)
tuple_type = cast(
Type[marshmallow.fields.Tuple],
@@ -483,8 +528,12 @@ def _field_for_generic_type(
elif origin in (dict, Dict, collections.abc.Mapping, Mapping):
dict_type = type_mapping.get(Dict, marshmallow.fields.Dict)
return dict_type(
- keys=field_for_schema(arguments[0], base_schema=base_schema),
- values=field_for_schema(arguments[1], base_schema=base_schema),
+ keys=field_for_schema(
+ arguments[0], base_schema=base_schema, typ_frame=typ_frame
+ ),
+ values=field_for_schema(
+ arguments[1], base_schema=base_schema, typ_frame=typ_frame
+ ),
**metadata,
)
elif typing_inspect.is_union_type(typ):
@@ -497,7 +546,10 @@ def _field_for_generic_type(
subtypes = [t for t in arguments if t is not NoneType] # type: ignore
if len(subtypes) == 1:
return field_for_schema(
- subtypes[0], metadata=metadata, base_schema=base_schema
+ subtypes[0],
+ metadata=metadata,
+ base_schema=base_schema,
+ typ_frame=typ_frame,
)
from . import union_field
@@ -506,7 +558,10 @@ def _field_for_generic_type(
(
subtyp,
field_for_schema(
- subtyp, metadata={"required": True}, base_schema=base_schema
+ subtyp,
+ metadata={"required": True},
+ base_schema=base_schema,
+ typ_frame=typ_frame,
),
)
for subtyp in subtypes
@@ -521,6 +576,7 @@ def field_for_schema(
default=marshmallow.missing,
metadata: Mapping[str, Any] = None,
base_schema: Optional[Type[marshmallow.Schema]] = None,
+ typ_frame: Optional[types.FrameType] = None,
) -> marshmallow.fields.Field:
"""
Get a marshmallow Field corresponding to the given python type.
@@ -530,6 +586,7 @@ def field_for_schema(
:param default: value to use for (de)serialization when the field is missing
:param metadata: Additional parameters to pass to the marshmallow field constructor
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
+ :param typ_frame: frame of type definition
>>> int_field = field_for_schema(int, default=9, metadata=dict(required=True))
>>> int_field.__class__
@@ -588,22 +645,24 @@ def field_for_schema(
subtyp = type(default)
else:
subtyp = Any
- return field_for_schema(subtyp, default, metadata, base_schema)
+ return field_for_schema(subtyp, default, metadata, base_schema, typ_frame)
# Generic types
- generic_field = _field_for_generic_type(typ, base_schema, **metadata)
+ generic_field = _field_for_generic_type(typ, base_schema, typ_frame, **metadata)
if generic_field:
return generic_field
- # typing.NewType returns a function with a __supertype__ attribute
+ # typing.NewType returns a function (in python <= 3.9) or a class (python >= 3.10) with a
+ # __supertype__ attribute
newtype_supertype = getattr(typ, "__supertype__", None)
- if newtype_supertype and inspect.isfunction(typ):
+ if typing_inspect.is_new_type(typ) and newtype_supertype is not None:
return _field_by_supertype(
typ=typ,
default=default,
newtype_supertype=newtype_supertype,
metadata=metadata,
base_schema=base_schema,
+ typ_frame=typ_frame,
)
# enumerations
@@ -613,12 +672,15 @@ def field_for_schema(
return marshmallow_enum.EnumField(typ, **metadata)
# Nested marshmallow dataclass
+ # it would be just a class name instead of actual schema util the schema is not ready yet
nested_schema = getattr(typ, "Schema", None)
# Nested dataclasses
forward_reference = getattr(typ, "__forward_arg__", None)
nested = (
- nested_schema or forward_reference or _internal_class_schema(typ, base_schema)
+ nested_schema
+ or forward_reference
+ or _internal_class_schema(typ, base_schema, typ_frame)
)
return marshmallow.fields.Nested(nested, **metadata)
diff --git a/marshmallow_dataclass/lazy_class_attribute.py b/marshmallow_dataclass/lazy_class_attribute.py
new file mode 100644
index 0000000..f930cc0
--- /dev/null
+++ b/marshmallow_dataclass/lazy_class_attribute.py
@@ -0,0 +1,42 @@
+from typing import Any, Callable
+
+
+__all__ = ("lazy_class_attribute",)
+
+
+class LazyClassAttribute:
+ """Descriptor decorator implementing a class-level, read-only
+ property, which caches its results on the class(es) on which it
+ operates.
+ """
+
+ __slots__ = ("func", "name", "called", "forward_value")
+
+ def __init__(
+ self, func: Callable[..., Any], name: str = None, forward_value: Any = None
+ ):
+ self.func = func
+ self.name = name
+ self.called = False
+ self.forward_value = forward_value
+
+ def __get__(self, instance, cls=None):
+ if not cls:
+ cls = type(instance)
+
+ # avoid recursion
+ if self.called:
+ return self.forward_value
+
+ self.called = True
+
+ setattr(cls, self.name, self.func())
+
+ # "getattr" is used to handle bounded methods
+ return getattr(cls, self.name)
+
+ def __set_name__(self, owner, name):
+ self.name = self.name or name
+
+
+lazy_class_attribute = LazyClassAttribute
diff --git a/setup.py b/setup.py
index 01f2d78..880f7d0 100644
--- a/setup.py
+++ b/setup.py
@@ -18,8 +18,8 @@ CLASSIFIERS = [
EXTRAS_REQUIRE = {
"enum": ["marshmallow-enum"],
"union": ["typeguard"],
- ':python_version == "3.6"': ["dataclasses", "types-dataclasses"],
- "lint": ["pre-commit~=1.18"],
+ "lint": ["pre-commit~=2.17"],
+ ':python_version == "3.6"': ["dataclasses", "types-dataclasses<0.6.4"],
"docs": ["sphinx"],
"tests": [
"pytest>=5.4",
@@ -29,7 +29,7 @@ EXTRAS_REQUIRE = {
# `Literal` was introduced in:
# - Python 3.8 (https://www.python.org/dev/peps/pep-0586)
# - typing-extensions 3.7.2 (https://github.com/python/typing/pull/591)
- "typing-extensions~=3.7.2; python_version < '3.8'",
+ "typing-extensions>=3.7.2; python_version < '3.8'",
],
}
EXTRAS_REQUIRE["dev"] = (
|
lovasoa/marshmallow_dataclass
|
fa6c28980ccfe45742cdc9430bfb3b737690935f
|
diff --git a/tests/test_class_schema.py b/tests/test_class_schema.py
index 02f3ba3..69f358b 100644
--- a/tests/test_class_schema.py
+++ b/tests/test_class_schema.py
@@ -1,3 +1,4 @@
+import inspect
import typing
import unittest
from typing import Any, TYPE_CHECKING
@@ -38,12 +39,60 @@ class TestClassSchema(unittest.TestCase):
complex_set = {
class_schema(ComplexNested),
class_schema(ComplexNested, base_schema=None),
+ class_schema(ComplexNested, clazz_frame=None),
class_schema(ComplexNested, None),
+ class_schema(ComplexNested, None, None),
}
simple_set = {
class_schema(Simple),
class_schema(Simple, base_schema=None),
+ class_schema(Simple, clazz_frame=None),
class_schema(Simple, None),
+ class_schema(Simple, None, None),
+ }
+ self.assertEqual(len(complex_set), 1)
+ self.assertEqual(len(simple_set), 1)
+
+ def test_nested_schema_with_passed_frame(self):
+ @dataclasses.dataclass
+ class Simple:
+ one: str = dataclasses.field()
+ two: str = dataclasses.field()
+
+ @dataclasses.dataclass
+ class ComplexNested:
+ three: int = dataclasses.field()
+ four: Simple = dataclasses.field()
+
+ frame = inspect.stack()[0][0]
+
+ self.assertIs(
+ class_schema(ComplexNested, clazz_frame=frame),
+ class_schema(ComplexNested, clazz_frame=frame),
+ )
+ self.assertIs(
+ class_schema(Simple, clazz_frame=frame),
+ class_schema(Simple, clazz_frame=frame),
+ )
+ self.assertIs(
+ class_schema(Simple, clazz_frame=frame),
+ class_schema(ComplexNested, clazz_frame=frame)
+ ._declared_fields["four"]
+ .nested,
+ )
+
+ complex_set = {
+ class_schema(ComplexNested, clazz_frame=frame),
+ class_schema(ComplexNested, base_schema=None, clazz_frame=frame),
+ class_schema(ComplexNested, None, clazz_frame=frame),
+ class_schema(ComplexNested, None, frame),
+ }
+ simple_set = {
+ class_schema(Simple, clazz_frame=frame),
+ class_schema(Simple, base_schema=None, clazz_frame=frame),
+ class_schema(Simple, None, clazz_frame=frame),
+ class_schema(Simple, clazz_frame=frame),
+ class_schema(Simple, None, frame),
}
self.assertEqual(len(complex_set), 1)
self.assertEqual(len(simple_set), 1)
diff --git a/tests/test_forward_references.py b/tests/test_forward_references.py
new file mode 100644
index 0000000..fc05b12
--- /dev/null
+++ b/tests/test_forward_references.py
@@ -0,0 +1,135 @@
+import unittest
+from typing import List, Optional
+
+from marshmallow_dataclass import dataclass
+
+
+@dataclass
+class GlobalA:
+ b: "GlobalB"
+
+
+@dataclass
+class GlobalB:
+ pass
+
+
+@dataclass
+class GlobalSelfRecursion:
+ related: "List[GlobalSelfRecursion]"
+
+
+@dataclass
+class GlobalRecursion:
+ related: "List[GlobalRecursion]"
+
+
+@dataclass
+class GlobalCyclicA:
+ b: "Optional[GlobalCyclicB]"
+
+
+@dataclass
+class GlobalCyclicB:
+ a: "Optional[GlobalCyclicA]"
+
+
+class TestForwardReferences(unittest.TestCase):
+ def test_late_evaluated_types(self):
+ @dataclass
+ class MyData:
+ value: int
+
+ self.assertEqual(MyData(1), MyData.Schema().load(dict(value=1)))
+
+ def test_forward_references_for_basic_types(self):
+ @dataclass
+ class Person:
+ name: "str"
+ age: "int"
+
+ self.assertEqual(
+ Person("Jon", 25), Person.Schema().load(dict(name="Jon", age=25))
+ )
+
+ def test_global_forward_references(self):
+ self.assertEqual(GlobalA(GlobalB()), GlobalA.Schema().load(dict(b=dict())))
+
+ def test_global_self_recursive_type(self):
+ self.assertEqual(
+ GlobalSelfRecursion([GlobalSelfRecursion([])]),
+ GlobalSelfRecursion.Schema().load(dict(related=[dict(related=[])])),
+ )
+
+ def test_global_recursive_type(self):
+ self.assertEqual(
+ GlobalRecursion([GlobalRecursion([])]),
+ GlobalRecursion.Schema().load(dict(related=[dict(related=[])])),
+ )
+
+ def test_global_circular_reference(self):
+ self.assertEqual(
+ GlobalCyclicA(GlobalCyclicB(GlobalCyclicA(None))),
+ GlobalCyclicA.Schema().load(dict(b=dict(a=dict(b=None)))),
+ )
+
+ def test_local_self_recursive_type(self):
+ @dataclass
+ class LocalSelfRecursion:
+ related: "List[LocalSelfRecursion]"
+
+ self.assertEqual(
+ LocalSelfRecursion([LocalSelfRecursion([])]),
+ LocalSelfRecursion.Schema().load(dict(related=[dict(related=[])])),
+ )
+
+ def test_local_recursive_type(self):
+ @dataclass
+ class LocalRecursion:
+ related: "List[LocalRecursion]"
+
+ self.assertEqual(
+ LocalRecursion([LocalRecursion([])]),
+ LocalRecursion.Schema().load(dict(related=[dict(related=[])])),
+ )
+
+ def test_local_forward_references(self):
+ @dataclass
+ class LocalA:
+ b: "LocalB"
+
+ @dataclass
+ class LocalB:
+ pass
+
+ self.assertEqual(LocalA(LocalB()), LocalA.Schema().load(dict(b=dict())))
+
+ def test_name_collisions(self):
+ """
+ This is one example about why you should not make local schemas
+ :return:
+ """
+
+ def make_another_a():
+ @dataclass
+ class A:
+ d: int
+
+ A.Schema()
+
+ make_another_a()
+
+ @dataclass
+ class A:
+ c: int
+
+ A.Schema()
+
+ @dataclass
+ class B:
+ a: "A"
+
+ # with self.assertRaises(marshmallow.exceptions.ValidationError):
+ B.Schema().load(dict(a=dict(c=1)))
+ # marshmallow.exceptions.ValidationError:
+ # {'a': {'d': ['Missing data for required field.'], 'c': ['Unknown field.']}}
|
typing-extensions constraint causes CI failures on dependant projects
marshmallow_dataclass 8.5.3 depends on `typing-extensions (>=3.7.2,<3.8.0)`
https://github.com/lovasoa/marshmallow_dataclass/blob/fa6c28980ccfe45742cdc9430bfb3b737690935f/setup.py#L32
but black 21.9b0 depends on `typing-extensions>=3.10.0.0`
https://github.com/psf/black/blob/79575f3376f043186d8b8c4885ef51c6b3c36246/setup.py#L82
Are there any plans to relax constraints on typing-extensions?
Sample run failure:
https://github.com/hacf-fr/renault-api/pull/375/checks?check_run_id=3728756130
|
0.0
|
fa6c28980ccfe45742cdc9430bfb3b737690935f
|
[
"tests/test_class_schema.py::TestClassSchema::test_any_none",
"tests/test_class_schema.py::TestClassSchema::test_any_none_disallowed",
"tests/test_class_schema.py::TestClassSchema::test_filtering_list_schema",
"tests/test_class_schema.py::TestClassSchema::test_final",
"tests/test_class_schema.py::TestClassSchema::test_final_infers_type_from_default",
"tests/test_class_schema.py::TestClassSchema::test_literal",
"tests/test_class_schema.py::TestClassSchema::test_literal_multiple_types",
"tests/test_class_schema.py::TestClassSchema::test_nested_schema_with_passed_frame",
"tests/test_class_schema.py::TestClassSchema::test_simple_unique_schemas",
"tests/test_class_schema.py::TestClassSchema::test_use_type_mapping_from_base_schema",
"tests/test_forward_references.py::TestForwardReferences::test_forward_references_for_basic_types",
"tests/test_forward_references.py::TestForwardReferences::test_global_circular_reference",
"tests/test_forward_references.py::TestForwardReferences::test_global_forward_references",
"tests/test_forward_references.py::TestForwardReferences::test_global_recursive_type",
"tests/test_forward_references.py::TestForwardReferences::test_global_self_recursive_type",
"tests/test_forward_references.py::TestForwardReferences::test_late_evaluated_types",
"tests/test_forward_references.py::TestForwardReferences::test_local_forward_references",
"tests/test_forward_references.py::TestForwardReferences::test_local_recursive_type",
"tests/test_forward_references.py::TestForwardReferences::test_local_self_recursive_type",
"tests/test_forward_references.py::TestForwardReferences::test_name_collisions"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-28 06:56:29+00:00
|
mit
| 3,638 |
|
lovasoa__marshmallow_dataclass-53
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 982f4eb..ffe5fdc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,11 @@
# marshmallow_dataclass change log
+## v7.1.1
+ - Fix behavior when `base_schema` is passed to a nested dataclass/schema
+ ([#52](https://github.com/lovasoa/marshmallow_dataclass/issues/52)).
+ Thanks [@ADR-007-SoftServe](https://github.com/ADR-007-SoftServe)
+ for the catch and patch.
+
## v7.1.0
- Improved documentation
- The library now has more unit tests
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index a9b3119..a2dd954 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -117,7 +117,7 @@ def add_schema(_cls: Type[_U]) -> Type[_U]:
@overload
def add_schema(
- base_schema: Type[marshmallow.Schema] = None
+ base_schema: Type[marshmallow.Schema] = None,
) -> Callable[[Type[_U]], Type[_U]]:
...
@@ -422,9 +422,15 @@ def field_for_schema(
return marshmallow_enum.EnumField(typ, **metadata)
+ # Nested marshmallow dataclass
+ nested_schema = getattr(typ, "Schema", None)
+
# Nested dataclasses
forward_reference = getattr(typ, "__forward_arg__", None)
- nested = forward_reference or class_schema(typ, base_schema=base_schema)
+ nested = (
+ nested_schema or forward_reference or class_schema(typ, base_schema=base_schema)
+ )
+
return marshmallow.fields.Nested(nested, **metadata)
|
lovasoa/marshmallow_dataclass
|
2de0d398d63e2f783867bfded6a9276326effda1
|
diff --git a/tests/test_field_for_schema.py b/tests/test_field_for_schema.py
index 4d8b0a0..8f356e9 100644
--- a/tests/test_field_for_schema.py
+++ b/tests/test_field_for_schema.py
@@ -1,11 +1,12 @@
+import inspect
import typing
import unittest
from enum import Enum
from typing import Dict, Optional, Union, Any
-from marshmallow import fields
+from marshmallow import fields, Schema
-from marshmallow_dataclass import field_for_schema
+from marshmallow_dataclass import field_for_schema, dataclass
class TestFieldForSchema(unittest.TestCase):
@@ -13,7 +14,11 @@ class TestFieldForSchema(unittest.TestCase):
self.assertEqual(a.__class__, b.__class__, "field class")
def attrs(x):
- return {k: repr(v) for k, v in x.__dict__.items() if not k.startswith("_")}
+ return {
+ k: f"{v!r} ({v.__mro__!r})" if inspect.isclass(v) else repr(v)
+ for k, v in x.__dict__.items()
+ if not k.startswith("_")
+ }
self.assertEqual(attrs(a), attrs(b))
@@ -97,6 +102,19 @@ class TestFieldForSchema(unittest.TestCase):
fields.Integer(required=False, description="UserId", default=0, missing=0),
)
+ def test_marshmallow_dataclass(self):
+ class NewSchema(Schema):
+ pass
+
+ @dataclass(base_schema=NewSchema)
+ class NewDataclass:
+ pass
+
+ self.assertFieldsEqual(
+ field_for_schema(NewDataclass, metadata=dict(required=False)),
+ fields.Nested(NewDataclass.Schema),
+ )
+
if __name__ == "__main__":
unittest.main()
|
`base_schema` of nested dataclasses is ingored
The base_schema of the nested field is ignored.
## Example:
```py
from marshmallow import Schema, post_dump
from marshmallow_dataclass import dataclass
class DoubleSchema(Schema):
@post_dump
def double(self, data, **kwargs):
return dict(double=data['number'] * 2)
@dataclass(base_schema=DoubleSchema)
class A:
number: int
@dataclass
class B:
a: A
print('is A.Schema correct?', issubclass(A.Schema, DoubleSchema))
print('is B.a.Schema correct?', isinstance(B.Schema._declared_fields['a'].schema, DoubleSchema))
print()
print('expected:')
print(" {'a': {'double': 2}}")
print('actual:')
print(' ', B.Schema().dump(B(A(1))))
```
stdout:
```
is A.Schema correct? True
is B.a.Schema correct? False
expected:
{'a': {'double': 2}}
actual:
{'a': {'number': 1}}
```
## How to fix
```diff
--- marshmallow_dataclass/__init__.py 2019-11-25 14:59:36.146392038 +0200
+++ marshmallow_dataclass/__init__.py.fixed 2019-11-25 15:01:01.947898497 +0200
@@ -422,9 +422,12 @@ def field_for_schema(
+ # Nested marshmellow dataclass
+ nested_schema = getattr(typ, 'Schema', None)
+
# Nested dataclasses
forward_reference = getattr(typ, "__forward_arg__", None)
- nested = forward_reference or class_schema(typ, base_schema=base_schema)
+ nested = nested_schema or forward_reference or class_schema(typ, base_schema=base_schema)
return marshmallow.fields.Nested(nested, **metadata)
```
|
0.0
|
2de0d398d63e2f783867bfded6a9276326effda1
|
[
"tests/test_field_for_schema.py::TestFieldForSchema::test_marshmallow_dataclass"
] |
[
"tests/test_field_for_schema.py::TestFieldForSchema::test_any",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_dict",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_list",
"tests/test_field_for_schema.py::TestFieldForSchema::test_dict_from_typing",
"tests/test_field_for_schema.py::TestFieldForSchema::test_enum",
"tests/test_field_for_schema.py::TestFieldForSchema::test_explicit_field",
"tests/test_field_for_schema.py::TestFieldForSchema::test_int",
"tests/test_field_for_schema.py::TestFieldForSchema::test_newtype",
"tests/test_field_for_schema.py::TestFieldForSchema::test_optional_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-25 13:51:04+00:00
|
mit
| 3,639 |
|
lovasoa__marshmallow_dataclass-76
|
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 67e7d77..403da98 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -34,10 +34,9 @@ Full example::
})
Schema: ClassVar[Type[Schema]] = Schema # For the type checker
"""
-import dataclasses
import inspect
-from functools import lru_cache
from enum import EnumMeta
+from functools import lru_cache
from typing import (
overload,
Dict,
@@ -54,6 +53,7 @@ from typing import (
Set,
)
+import dataclasses
import marshmallow
import typing_inspect
@@ -457,12 +457,17 @@ def _base_schema(
Base schema factory that creates a schema for `clazz` derived either from `base_schema`
or `BaseSchema`
"""
+
# Remove `type: ignore` when mypy handles dynamic base classes
# https://github.com/python/mypy/issues/2813
class BaseSchema(base_schema or marshmallow.Schema): # type: ignore
- @marshmallow.post_load
- def make_data_class(self, data, **_):
- return clazz(**data)
+ def load(self, data: Mapping, *, many: bool = None, **kwargs):
+ all_loaded = super().load(data, many=many, **kwargs)
+ many = self.many if many is None else bool(many)
+ if many:
+ return [clazz(**loaded) for loaded in all_loaded]
+ else:
+ return clazz(**all_loaded)
return BaseSchema
|
lovasoa/marshmallow_dataclass
|
69caec49402adb07149ff87700ef52eeeeba0066
|
diff --git a/tests/test_post_load.py b/tests/test_post_load.py
new file mode 100644
index 0000000..2e53c2c
--- /dev/null
+++ b/tests/test_post_load.py
@@ -0,0 +1,44 @@
+import unittest
+
+import marshmallow
+
+import marshmallow_dataclass
+
+
+# Regression test for https://github.com/lovasoa/marshmallow_dataclass/issues/75
+class TestPostLoad(unittest.TestCase):
+ @marshmallow_dataclass.dataclass
+ class Named:
+ first_name: str
+ last_name: str
+
+ @marshmallow.post_load
+ def a(self, data, **_kwargs):
+ data["first_name"] = data["first_name"].capitalize()
+ return data
+
+ @marshmallow.post_load
+ def z(self, data, **_kwargs):
+ data["last_name"] = data["last_name"].capitalize()
+ return data
+
+ def test_post_load_method_naming_does_not_affect_data(self):
+ actual = self.Named.Schema().load(
+ {"first_name": "matt", "last_name": "groening"}
+ )
+ expected = self.Named(first_name="Matt", last_name="Groening")
+ self.assertEqual(actual, expected)
+
+ def test_load_many(self):
+ actual = self.Named.Schema().load(
+ [
+ {"first_name": "matt", "last_name": "groening"},
+ {"first_name": "bart", "last_name": "simpson"},
+ ],
+ many=True,
+ )
+ expected = [
+ self.Named(first_name="Matt", last_name="Groening"),
+ self.Named(first_name="Bart", last_name="Simpson"),
+ ]
+ self.assertEqual(actual, expected)
|
Behavior of marshmallow.post_load is dependent on function name
I discovered a discrepancy with how the `post_load` decorator behaves within a marshmallow dataclass. In [Schema._invoke_processors](https://github.com/marshmallow-code/marshmallow/blob/76196abf35ff9ec58f3dc2377ea7a8a9bf23712a/src/marshmallow/schema.py#L1192) the post loads `make_data_class` processor runs before or after the post_load defined in the dataclass based on whichever comes first alphabetically. Some example below illustrate this:
```python
import marshmallow
import marshmallow_dataclass
@marshmallow_dataclass.dataclass
class Person:
name: str
@marshmallow.post_load
def a(schema, data, **kwargs):
data.name = data.name.capitalize() # works if: data['name'] = data['name'].capitalize()
return data
Person.Schema().load({'name': 'matt'})
>> AttributeError: 'dict' object has no attribute 'name'
```
```python
import marshmallow
import marshmallow_dataclass
@marshmallow_dataclass.dataclass
class Person:
name: str
@marshmallow.post_load
def z(schema, data, **kwargs):
data.name = data.name.capitalize()
return data
Person.Schema().load({'name': 'matt'})
>> Person(name='Matt')
```
The post_load in the first example does not create an instance of the dataclass itself before being invoked and feels more so like a `pre_load` because of this. The validation checks still seem to work properly, but it just feels a little weird to work with. I know the documentation does not include anything about pre/post loading, but I figured I'd document this in case anyone else runs into the same issue.
|
0.0
|
69caec49402adb07149ff87700ef52eeeeba0066
|
[
"tests/test_post_load.py::TestPostLoad::test_load_many",
"tests/test_post_load.py::TestPostLoad::test_post_load_method_naming_does_not_affect_data"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-19 23:17:43+00:00
|
mit
| 3,640 |
|
lovasoa__marshmallow_dataclass-81
|
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 403da98..9b223a5 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -319,12 +319,9 @@ def _proxied_class_schema(
def _field_by_type(
typ: Union[type, Any], base_schema: Optional[Type[marshmallow.Schema]]
) -> Optional[Type[marshmallow.fields.Field]]:
- if typ is Any:
- return marshmallow.fields.Raw
- else:
- return (
- base_schema and base_schema.TYPE_MAPPING.get(typ)
- ) or marshmallow.Schema.TYPE_MAPPING.get(typ)
+ return (
+ base_schema and base_schema.TYPE_MAPPING.get(typ)
+ ) or marshmallow.Schema.TYPE_MAPPING.get(typ)
def field_for_schema(
@@ -378,6 +375,10 @@ def field_for_schema(
if field:
return field(**metadata)
+ if typ is Any:
+ metadata.setdefault("allow_none", True)
+ return marshmallow.fields.Raw(**metadata)
+
# Generic types
origin = typing_inspect.get_origin(typ)
if origin:
|
lovasoa/marshmallow_dataclass
|
bfda341de52c5ca4e0c5898c644298ff39720254
|
diff --git a/tests/test_class_schema.py b/tests/test_class_schema.py
index 5dd6958..094ce37 100644
--- a/tests/test_class_schema.py
+++ b/tests/test_class_schema.py
@@ -1,11 +1,13 @@
-import dataclasses
import unittest
+from typing import Any
from uuid import UUID
-from marshmallow_dataclass import class_schema
-from marshmallow import Schema
+import dataclasses
+from marshmallow import Schema, ValidationError
from marshmallow.fields import Field, UUID as UUIDField
+from marshmallow_dataclass import class_schema
+
class TestClassSchema(unittest.TestCase):
def test_simple_unique_schemas(self):
@@ -58,6 +60,24 @@ class TestClassSchema(unittest.TestCase):
self.assertIsInstance(schema.fields["custom"], CustomField)
self.assertIsInstance(schema.fields["uuid"], UUIDField)
+ def test_any_none(self):
+ # See: https://github.com/lovasoa/marshmallow_dataclass/issues/80
+ @dataclasses.dataclass
+ class A:
+ data: Any
+
+ schema = class_schema(A)()
+ self.assertEqual(A(data=None), schema.load({"data": None}))
+ self.assertEqual(schema.dump(A(data=None)), {"data": None})
+
+ def test_any_none_disallowed(self):
+ @dataclasses.dataclass
+ class A:
+ data: Any = dataclasses.field(metadata={"allow_none": False})
+
+ schema = class_schema(A)()
+ self.assertRaises(ValidationError, lambda: schema.load({"data": None}))
+
if __name__ == "__main__":
unittest.main()
diff --git a/tests/test_field_for_schema.py b/tests/test_field_for_schema.py
index 8f356e9..c597e87 100644
--- a/tests/test_field_for_schema.py
+++ b/tests/test_field_for_schema.py
@@ -29,7 +29,9 @@ class TestFieldForSchema(unittest.TestCase):
)
def test_any(self):
- self.assertFieldsEqual(field_for_schema(Any), fields.Raw(required=True))
+ self.assertFieldsEqual(
+ field_for_schema(Any), fields.Raw(required=True, allow_none=True)
+ )
def test_dict_from_typing(self):
self.assertFieldsEqual(
@@ -45,8 +47,8 @@ class TestFieldForSchema(unittest.TestCase):
self.assertFieldsEqual(
field_for_schema(dict),
fields.Dict(
- keys=fields.Raw(required=True),
- values=fields.Raw(required=True),
+ keys=fields.Raw(required=True, allow_none=True),
+ values=fields.Raw(required=True, allow_none=True),
required=True,
),
)
@@ -54,7 +56,7 @@ class TestFieldForSchema(unittest.TestCase):
def test_builtin_list(self):
self.assertFieldsEqual(
field_for_schema(list, metadata=dict(required=False)),
- fields.List(fields.Raw(required=True), required=False),
+ fields.List(fields.Raw(required=True, allow_none=True), required=False),
)
def test_explicit_field(self):
|
Bug: not allow null in any values
```py
from typing import Any
from typing import Dict
from marshmallow_dataclass import dataclass
@dataclass()
class A:
data: Dict[str, Any]
A.Schema().load({
'data': {'inner_data': None}
})
```
|
0.0
|
bfda341de52c5ca4e0c5898c644298ff39720254
|
[
"tests/test_class_schema.py::TestClassSchema::test_any_none",
"tests/test_field_for_schema.py::TestFieldForSchema::test_any",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_dict",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_list"
] |
[
"tests/test_class_schema.py::TestClassSchema::test_any_none_disallowed",
"tests/test_class_schema.py::TestClassSchema::test_simple_unique_schemas",
"tests/test_class_schema.py::TestClassSchema::test_use_type_mapping_from_base_schema",
"tests/test_field_for_schema.py::TestFieldForSchema::test_dict_from_typing",
"tests/test_field_for_schema.py::TestFieldForSchema::test_enum",
"tests/test_field_for_schema.py::TestFieldForSchema::test_explicit_field",
"tests/test_field_for_schema.py::TestFieldForSchema::test_int",
"tests/test_field_for_schema.py::TestFieldForSchema::test_marshmallow_dataclass",
"tests/test_field_for_schema.py::TestFieldForSchema::test_newtype",
"tests/test_field_for_schema.py::TestFieldForSchema::test_optional_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-02 10:54:57+00:00
|
mit
| 3,641 |
|
lovasoa__marshmallow_dataclass-85
|
diff --git a/marshmallow_dataclass/__init__.py b/marshmallow_dataclass/__init__.py
index 9b223a5..d6f1554 100644
--- a/marshmallow_dataclass/__init__.py
+++ b/marshmallow_dataclass/__init__.py
@@ -351,6 +351,7 @@ def field_for_schema(
"""
metadata = {} if metadata is None else dict(metadata)
+
if default is not marshmallow.missing:
metadata.setdefault("default", default)
# 'missing' must not be set for required fields.
@@ -383,16 +384,22 @@ def field_for_schema(
origin = typing_inspect.get_origin(typ)
if origin:
arguments = typing_inspect.get_args(typ, True)
+ # Override base_schema.TYPE_MAPPING to change the class used for generic types below
+ type_mapping = base_schema.TYPE_MAPPING if base_schema else {}
+
if origin in (list, List):
child_type = field_for_schema(arguments[0], base_schema=base_schema)
- return marshmallow.fields.List(child_type, **metadata)
+ list_type = type_mapping.get(List, marshmallow.fields.List)
+ return list_type(child_type, **metadata)
if origin in (tuple, Tuple):
children = tuple(
field_for_schema(arg, base_schema=base_schema) for arg in arguments
)
- return marshmallow.fields.Tuple(children, **metadata)
+ tuple_type = type_mapping.get(Tuple, marshmallow.fields.Tuple)
+ return tuple_type(children, **metadata)
elif origin in (dict, Dict):
- return marshmallow.fields.Dict(
+ dict_type = type_mapping.get(Dict, marshmallow.fields.Dict)
+ return dict_type(
keys=field_for_schema(arguments[0], base_schema=base_schema),
values=field_for_schema(arguments[1], base_schema=base_schema),
**metadata,
|
lovasoa/marshmallow_dataclass
|
c57a440198de020073c5291e872bdb08ce247292
|
diff --git a/tests/test_field_for_schema.py b/tests/test_field_for_schema.py
index c597e87..699bd55 100644
--- a/tests/test_field_for_schema.py
+++ b/tests/test_field_for_schema.py
@@ -2,7 +2,7 @@ import inspect
import typing
import unittest
from enum import Enum
-from typing import Dict, Optional, Union, Any
+from typing import Dict, Optional, Union, Any, List, Tuple
from marshmallow import fields, Schema
@@ -117,6 +117,26 @@ class TestFieldForSchema(unittest.TestCase):
fields.Nested(NewDataclass.Schema),
)
+ def test_override_container_type_with_type_mapping(self):
+ type_mapping = [
+ (List, fields.List, List[int]),
+ (Dict, fields.Dict, Dict[str, int]),
+ (Tuple, fields.Tuple, Tuple[int, str, bytes]),
+ ]
+ for base_type, marshmallow_field, schema in type_mapping:
+
+ class MyType(marshmallow_field):
+ ...
+
+ self.assertIsInstance(field_for_schema(schema), marshmallow_field)
+
+ class BaseSchema(Schema):
+ TYPE_MAPPING = {base_type: MyType}
+
+ self.assertIsInstance(
+ field_for_schema(schema, base_schema=BaseSchema), MyType
+ )
+
if __name__ == "__main__":
unittest.main()
|
Use NewType for derived List class
Hi there!
Coming from vanilla marshmallow, I had a `List` class that was deriving from `marshmallow.fields.List` and was adding some logic to how it was reading the input data on `_deserialize`.
Now, I am trying to integrate this with `marshmallow_dataclass` and hence create a new type. I have created a few other types that were not lists and it worked well, but I can't get it to work for lists. Here is what I have tried so far:
```python3
import typing
import marshmallow
from marshmallow_dataclass import dataclass, NewType
class MyList(marshmallow.fields.List):
"""A list field type that properly handles lists in MultiDict"""
def _deserialize(self, value, attr, data, **kwargs):
# removed the code, just checking if this is called
raise Exception("Finally Called")
ListType = NewType("ListType", typing.List, field=MyList)
@dataclass
class TestDataClass1:
"""
This is the one I was expecting would work, as I thought that it would pass
`int` to `MyList` which would come back to something similar to vanilla
marshmallow i.e. `values = MyList(Int())`
"""
values: ListType(int)
# fails with 'Not a valid integer.'
TestDataClass1.Schema().load({"values": [1,2,3]})
# Second try!
@dataclass
class TestDataClass2:
values: ListType(typing.List[int])
# list is properly loaded, but that means MyList._deserialize was not called
TestDataClass2.Schema().load({"values": [1,2,3]})
```
I looked in the doc and example but could not find a reference for this use case, could you help me figure it out?
|
0.0
|
c57a440198de020073c5291e872bdb08ce247292
|
[
"tests/test_field_for_schema.py::TestFieldForSchema::test_override_container_type_with_type_mapping"
] |
[
"tests/test_field_for_schema.py::TestFieldForSchema::test_any",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_dict",
"tests/test_field_for_schema.py::TestFieldForSchema::test_builtin_list",
"tests/test_field_for_schema.py::TestFieldForSchema::test_dict_from_typing",
"tests/test_field_for_schema.py::TestFieldForSchema::test_enum",
"tests/test_field_for_schema.py::TestFieldForSchema::test_explicit_field",
"tests/test_field_for_schema.py::TestFieldForSchema::test_int",
"tests/test_field_for_schema.py::TestFieldForSchema::test_marshmallow_dataclass",
"tests/test_field_for_schema.py::TestFieldForSchema::test_newtype",
"tests/test_field_for_schema.py::TestFieldForSchema::test_optional_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_str",
"tests/test_field_for_schema.py::TestFieldForSchema::test_union"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-05-13 08:25:09+00:00
|
mit
| 3,642 |
|
lubianat__pyorcidator-40
|
diff --git a/src/pyorcidator/helper.py b/src/pyorcidator/helper.py
index 278e5ea..03d15cc 100644
--- a/src/pyorcidator/helper.py
+++ b/src/pyorcidator/helper.py
@@ -5,6 +5,7 @@ Helper functions for pyorcidator
import json
import logging
import re
+from typing import Mapping
import requests
@@ -18,15 +19,30 @@ EXTERNAL_ID_PROPERTIES = {
"Loop profile": "P2798",
"Scopus Author ID": "P1153",
"ResearcherID": "P1053",
+ "github": "P2037",
+ "twitter": "P2002",
+ "scopus": "P1153",
}
-
-
-def get_external_ids(data):
- id_list = data["person"]["external-identifiers"]["external-identifier"]
- id_dict = {}
- for id in id_list:
- id_dict[id["external-id-type"]] = id["external-id-value"]
- return id_dict
+PREFIXES = [
+ ("github", "https://github.com/"),
+ ("twitter", "https://twitter.com/"),
+ ("scopus", "https://www.scopus.com/authid/detail.uri?authorId=")
+ # TODO linkedin, figshare, researchgate, publons, semion, semantic scholar, google scholar, etc.
+]
+
+
+def get_external_ids(data) -> Mapping[str, str]:
+ """Get external identifiers that can be mapped to Wikidata properties."""
+ rv = {}
+ for d in data["person"]["external-identifiers"]["external-identifier"]:
+ rv[d["external-id-type"]] = d["external-id-value"]
+ for d in data["person"]["researcher-urls"].get("researcher-url", []):
+ # url_name = d["url-name"].lower().replace(" ", "")
+ url = d["url"]["value"].rstrip("/")
+ for key, url_prefix in PREFIXES:
+ if url.startswith(url_prefix):
+ rv[key] = url[len(url_prefix):]
+ return rv
def render_orcid_qs(orcid):
|
lubianat/pyorcidator
|
990e7e4b0926bf68b53af5f13f57aa5224bddc96
|
diff --git a/tests/conftest.py b/tests/conftest.py
index fce1233..ebf0613 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,6 +3,8 @@ from pathlib import Path
import pytest
+from pyorcidator.helper import get_external_ids, get_orcid_data
+
@pytest.fixture
def sample_orcid_data():
@@ -16,3 +18,12 @@ def wikidata_api_result():
sample_path = Path(__file__).parent.joinpath("wikidata_api_result.json")
return json.loads(sample_path.read_text())
+
+
[email protected]
+def orcid_w_external_links():
+ orcid = "0000-0002-0791-1347" # Selja Seppälä
+ data = get_orcid_data(orcid)
+ ids = get_external_ids(data)
+
+ return ids
diff --git a/tests/test_helper.py b/tests/test_helper.py
index 77e5caa..1f63d81 100644
--- a/tests/test_helper.py
+++ b/tests/test_helper.py
@@ -4,10 +4,11 @@ Tests for the helper module
from pyorcidator.helper import (
get_date,
+ get_external_ids,
+ get_orcid_data,
get_organization_list,
get_paper_dois,
lookup_id,
- get_external_ids,
render_orcid_qs,
)
@@ -57,3 +58,21 @@ def test_get_loop_id(sample_orcid_data):
def test_render_orcid_runs():
render_orcid_qs("0000-0003-4423-4370")
+
+
+def test_get_github(orcid_w_external_links):
+ """Test getting a github link."""
+ assert orcid_w_external_links["github"] == "seljaseppala"
+
+
+def test_get_twitter():
+ """Test getting a twitter link."""
+ orcid = "0000-0001-7542-0286" # Egon Willighagen
+ data = get_orcid_data(orcid)
+ ids = get_external_ids(data)
+ assert ids["twitter"] == "egonwillighagen"
+
+
+def test_get_scopus(orcid_w_external_links):
+ """Test getting a scopus ID."""
+ assert orcid_w_external_links["Scopus Author ID"] == "56352777000"
|
Parse websites and social links
e.g. see https://orcid.org/0000-0002-5292-4083
|
0.0
|
990e7e4b0926bf68b53af5f13f57aa5224bddc96
|
[
"tests/test_helper.py::test_get_github"
] |
[
"tests/test_helper.py::test_lookup_id",
"tests/test_helper.py::test_get_date",
"tests/test_helper.py::test_get_paper_dois",
"tests/test_helper.py::test_get_org_list",
"tests/test_helper.py::test_get_loop_id",
"tests/test_helper.py::test_render_orcid_runs",
"tests/test_helper.py::test_get_scopus"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-11 23:35:00+00:00
|
mit
| 3,643 |
|
lucianopaz__compress_pickle-28
|
diff --git a/compress_pickle/__init__.py b/compress_pickle/__init__.py
index a9221a9..1471ef4 100644
--- a/compress_pickle/__init__.py
+++ b/compress_pickle/__init__.py
@@ -4,4 +4,4 @@ from . import compressers
from . import picklers
from . import io
-__version__ = "2.0.0"
+__version__ = "2.0.1"
diff --git a/compress_pickle/utils.py b/compress_pickle/utils.py
index f172d63..4e226ad 100644
--- a/compress_pickle/utils.py
+++ b/compress_pickle/utils.py
@@ -135,4 +135,4 @@ def _infer_compression_from_path(path: PathType) -> Optional[str]:
def _set_default_extension(path: PathType, compression: Optional[str]) -> str:
root, current_ext = splitext(_stringyfy_path(path))
- return root + get_default_compression_mapping()[compression]
+ return root + "." + get_default_compression_mapping()[compression]
|
lucianopaz/compress_pickle
|
99ce03df097018870f502823f814db2d578cd0e5
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 8b54c9e..6ec6fcc 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,6 +1,12 @@
import pytest
import io
-from compress_pickle.utils import _stringyfy_path, _infer_compression_from_path
+from os.path import splitext
+from compress_pickle.compressers.registry import get_default_compression_mapping
+from compress_pickle.utils import (
+ _stringyfy_path,
+ _infer_compression_from_path,
+ _set_default_extension,
+)
def test_stringify_path():
@@ -10,6 +16,17 @@ def test_stringify_path():
_stringyfy_path({"a"})
[email protected]("compressions")
+def test_set_default_extension(compressions):
+ root = "somepath.someotherstuff"
+ path = root + ".ext"
+ new_path = _set_default_extension(path, compression=compressions)
+ assert splitext(new_path) == (
+ root,
+ "." + get_default_compression_mapping()[compressions],
+ )
+
+
@pytest.mark.usefixtures("valid_extensions")
def test_infer_compression_from_path(valid_extensions):
extension, compression = valid_extensions
|
File ending with `.pkl.gz` is stored as `.pklgz`
Hello @lucianopaz,
Thank you for making this library!
I wanted to ask why, if the provided path is `.pkl.gz`, the files are stored as `.pklgz`.
This feels like an undesired default behaviour that may be, if necessary, enabled with a flag, as it breaks libraries that depend on the files to be stored as the given path requires.
Ciao e Grazie,
Luca
|
0.0
|
99ce03df097018870f502823f814db2d578cd0e5
|
[
"tests/test_utils.py::test_set_default_extension[None]",
"tests/test_utils.py::test_set_default_extension[pickle]",
"tests/test_utils.py::test_set_default_extension[gzip]",
"tests/test_utils.py::test_set_default_extension[bz2]",
"tests/test_utils.py::test_set_default_extension[lzma]",
"tests/test_utils.py::test_set_default_extension[zipfile]",
"tests/test_utils.py::test_set_default_extension[lz4]"
] |
[
"tests/test_utils.py::test_stringify_path",
"tests/test_utils.py::test_infer_compression_from_path[('pkl',",
"tests/test_utils.py::test_infer_compression_from_path[('pickle',",
"tests/test_utils.py::test_infer_compression_from_path[('gz',",
"tests/test_utils.py::test_infer_compression_from_path[('bz',",
"tests/test_utils.py::test_infer_compression_from_path[('bz2',",
"tests/test_utils.py::test_infer_compression_from_path[('lzma',",
"tests/test_utils.py::test_infer_compression_from_path[('xz',",
"tests/test_utils.py::test_infer_compression_from_path[('zip',",
"tests/test_utils.py::test_infer_compression_from_path[('lz4',",
"tests/test_utils.py::test_infer_compression_from_path_unknown[]",
"tests/test_utils.py::test_infer_compression_from_path_unknown[unknown]",
"tests/test_utils.py::test_infer_compression_from_path_io_type"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-15 15:02:02+00:00
|
mit
| 3,644 |
|
lucianopaz__compress_pickle-35
|
diff --git a/compress_pickle/utils.py b/compress_pickle/utils.py
index 4e226ad..a5284b1 100644
--- a/compress_pickle/utils.py
+++ b/compress_pickle/utils.py
@@ -105,10 +105,20 @@ def instantiate_compresser(
BaseCompresser
The compresser instance that will be used to create the byte stream from which a
:class:`compress_pickle.picklers.base.BasePicklerIO` will read or write serialized objects.
+
+ Raises
+ ------
+ TypeError
+ If the supplied ``path`` is not a ``PATH_TYPES`` instance and the ``compression`` is "infer".
"""
if isinstance(path, PATH_TYPES):
_path = _stringyfy_path(path)
if compression == "infer":
+ if not isinstance(path, PATH_TYPES):
+ raise TypeError(
+ f"Cannot infer the compression from a path that is not an instance of "
+ f"{PATH_TYPES}. Encountered {type(path)}"
+ )
compression = _infer_compression_from_path(_path)
compresser_class = get_compresser(compression)
if set_default_extension and isinstance(path, PATH_TYPES):
|
lucianopaz/compress_pickle
|
a15a6e88b42fd52c0ec84c19303808b2a7f1b8f6
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 6ec6fcc..2eeb45e 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -6,6 +6,7 @@ from compress_pickle.utils import (
_stringyfy_path,
_infer_compression_from_path,
_set_default_extension,
+ instantiate_compresser,
)
@@ -54,6 +55,15 @@ def test_infer_compression_from_path_io_type():
_infer_compression_from_path(path)
+def test_instantiate_compresser_cannot_infer_compression():
+ with pytest.raises(
+ TypeError,
+ match="Cannot infer the compression from a path that is not an instance of ",
+ ):
+ with io.BytesIO() as path:
+ instantiate_compresser(compression="infer", path=path, mode="rb")
+
+
# def test_known_compressions():
# kcn = get_known_compressions()
# assert all((cn in kcn for cn in COMPRESSION_NAMES))
|
Support `with open` context manager
Currently, the following snippet does not infer the compression scheme properly and errors:
```python
import compress_pickle as cpickle
with open('test.lzma', 'wb') as f: # Same issue for other extensions
cpickle.dump([1, 2, 3], f)
```
Which results in the following error:
```
env/lib/python3.8/site-packages/compress_pickle/utils.py in instantiate_compresser(compression, path, mode, set_default_extension, **kwargs)
110 _path = _stringyfy_path(path)
111 if compression == "infer":
--> 112 compression = _infer_compression_from_path(_path)
113 compresser_class = get_compresser(compression)
114 if set_default_extension and isinstance(path, PATH_TYPES):
UnboundLocalError: local variable '_path' referenced before assignment
```
While the docstring says "a file-like object (``io.BaseIO`` instances) [...] will be passed to the `BaseCompresser` class", this does not happen. I'd suggest grabbing the filename like so:
```python
if isinstance(path, PATH_TYPES):
_path = _stringyfy_path(path)
elif isinstance(path, io.IOBase):
_path = path.name # this would set _path to 'test.lzma' in the above example
else:
raise RuntimeError("Unrecognized path")
if compression == "infer":
compression = _infer_compression_from_path(_path)
```
The error would simply ensure that the variable `_path` is defined. Something more descriptive could be added.
Any thoughts on this? I don't have time to submit a PR this week, but I'd be happy to at a later time. Thanks for this excellent library!
|
0.0
|
a15a6e88b42fd52c0ec84c19303808b2a7f1b8f6
|
[
"tests/test_utils.py::test_instantiate_compresser_cannot_infer_compression"
] |
[
"tests/test_utils.py::test_stringify_path",
"tests/test_utils.py::test_set_default_extension[None]",
"tests/test_utils.py::test_set_default_extension[pickle]",
"tests/test_utils.py::test_set_default_extension[gzip]",
"tests/test_utils.py::test_set_default_extension[bz2]",
"tests/test_utils.py::test_set_default_extension[lzma]",
"tests/test_utils.py::test_set_default_extension[zipfile]",
"tests/test_utils.py::test_set_default_extension[lz4]",
"tests/test_utils.py::test_infer_compression_from_path[('pkl',",
"tests/test_utils.py::test_infer_compression_from_path[('pickle',",
"tests/test_utils.py::test_infer_compression_from_path[('gz',",
"tests/test_utils.py::test_infer_compression_from_path[('bz',",
"tests/test_utils.py::test_infer_compression_from_path[('bz2',",
"tests/test_utils.py::test_infer_compression_from_path[('lzma',",
"tests/test_utils.py::test_infer_compression_from_path[('xz',",
"tests/test_utils.py::test_infer_compression_from_path[('zip',",
"tests/test_utils.py::test_infer_compression_from_path[('lz4',",
"tests/test_utils.py::test_infer_compression_from_path_unknown[]",
"tests/test_utils.py::test_infer_compression_from_path_unknown[unknown]",
"tests/test_utils.py::test_infer_compression_from_path_io_type"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-21 16:53:52+00:00
|
mit
| 3,645 |
|
lucidsushi__gs_pip_install-132
|
diff --git a/gs_pip_install/gs_pip_install.py b/gs_pip_install/gs_pip_install.py
index 1313713..3bc6d66 100644
--- a/gs_pip_install/gs_pip_install.py
+++ b/gs_pip_install/gs_pip_install.py
@@ -76,7 +76,7 @@ def install_packages(
package, *_ = gs_package_zip_file.split('.')
install_path = f"{packages_download_dir}/{gs_package_zip_file}"
if extras.get(package):
- install_path = f"{install_path}[{extras[package]}]"
+ install_path = f"{install_path}{extras[package]}"
if not target_dir:
install_command = [
|
lucidsushi/gs_pip_install
|
09668ce1fe393dc338b5b3f3181f76bd44891e74
|
diff --git a/tests/test_gs_pip_install.py b/tests/test_gs_pip_install.py
index c468e7a..a03a95f 100644
--- a/tests/test_gs_pip_install.py
+++ b/tests/test_gs_pip_install.py
@@ -147,7 +147,7 @@ class TestInstall(unittest.TestCase):
mock_list_dir.return_value = ['some_package.tar.gz']
gs_pip_install.install_packages(
packages_download_dir='some_download_dest',
- extras={'some_package': 'extra_a, extra_b'},
+ extras={'some_package': '[extra_a, extra_b]'},
)
mock_subprocess.assert_called_once_with(
[
@@ -160,3 +160,13 @@ class TestInstall(unittest.TestCase):
"some_download_dest/some_package.tar.gz[extra_a, extra_b]",
]
)
+
+ def test_strip_extras(self):
+
+ # fmt: off
+ assert gs_pip_install._strip_extras(
+ 'some_package.tar.gz[extra_a, extra_b]'
+ ) == (
+ 'some_package.tar.gz', '[extra_a, extra_b]'
+ )
+ # fmt: on
|
Path error when specifying extras
https://github.com/lucidsushi/gs_pip_install/blob/09668ce1fe393dc338b5b3f3181f76bd44891e74/gs_pip_install/gs_pip_install.py#L79
Currently a list of lists is created - will raise:
ERROR: Invalid requirement: 'gcs_packages/geotab_data_factory.tar.gz[[pipelines]]'
Hint: It looks like a path. File 'gcs_packages/geotab_data_factory.tar.gz[[pipelines]]' does not exist.
Replace to `install_path = f"{install_path}{extras[package]}`
|
0.0
|
09668ce1fe393dc338b5b3f3181f76bd44891e74
|
[
"tests/test_gs_pip_install.py::TestInstall::test_install_packages_with_extras"
] |
[
"tests/test_gs_pip_install.py::TestInstall::test_cli_package_name",
"tests/test_gs_pip_install.py::TestInstall::test_cli_req_file",
"tests/test_gs_pip_install.py::TestInstall::test_download_packages",
"tests/test_gs_pip_install.py::TestInstall::test_install_packages_no_target_dir",
"tests/test_gs_pip_install.py::TestInstall::test_install_packages_with_target_dir",
"tests/test_gs_pip_install.py::TestInstall::test_strip_extras"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-09 03:36:55+00:00
|
mit
| 3,646 |
|
lucidsushi__gs_pip_install-135
|
diff --git a/gs_pip_install/gs_pip_install.py b/gs_pip_install/gs_pip_install.py
index c17a315..fab8ce9 100644
--- a/gs_pip_install/gs_pip_install.py
+++ b/gs_pip_install/gs_pip_install.py
@@ -13,9 +13,7 @@ from google.cloud import storage
@click.command()
@click.option('-b', "--bucket_name", help="(str) Name of GCS bucket")
@click.option(
- '-r',
- "--requirement",
- help="(str) Name of Python package or requirements file",
+ '-r', "--requirement", help="(str) Name of Python package or requirements file",
)
@click.option(
'-d',
@@ -80,10 +78,9 @@ def install_packages(
if extras.get(package):
install_path = f"{install_path}{extras[package]}"
- shims_python = f"{os.environ['HOME']}/.pyenv/shims/python"
if not target_dir:
install_command = [
- shims_python,
+ sys.executable,
"-m",
"pip",
"install",
@@ -93,7 +90,7 @@ def install_packages(
]
else:
install_command = [
- shims_python,
+ sys.executable,
"-m",
"pip",
"install",
@@ -108,15 +105,15 @@ def install_packages(
subprocess.check_output(install_command)
except Exception as e:
logging.error(f"install failed using: {install_command}")
- logging.warning(f"{e}\nAttempting pip install with sys.executable\n")
- install_command[0] = sys.executable
+ logging.warning(f"Attempting pip install with pyenv python:\n {e}")
+ install_command[0] = f"{os.environ['HOME']}/.pyenv/shims/python"
subprocess.check_output(install_command)
+ except Exception as e:
+ logging.error(f"install failed using: {install_command}")
def download_packages(
- packages_download_dir: str,
- bucket_name: str,
- package_list: List[str],
+ packages_download_dir: str, bucket_name: str, package_list: List[str],
):
"""Download Python packages from GCS into a local directory.
@@ -148,10 +145,10 @@ def download_packages(
def _strip_extras(path: str) -> Tuple[str, Optional[str]]:
"""
- The function splits a package name into package without extras
- and extras.
- Function obtained from PIP Source Code
- https://github.com/pypa/pip/blob/5bc7b33d41546c218e2ae786b02a7d30c2d1723c/src/pip/_internal/req/constructors.py#L42
+ The function splits a package name into package without extras
+ and extras.
+ Function obtained from PIP Source Code
+ https://github.com/pypa/pip/blob/5bc7b33d41546c218e2ae786b02a7d30c2d1723c/src/pip/_internal/req/constructors.py#L42
"""
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
|
lucidsushi/gs_pip_install
|
dd0e90a8897c4df97d6b547570a2072a88996032
|
diff --git a/tests/test_gs_pip_install.py b/tests/test_gs_pip_install.py
index e58ab71..a03a95f 100644
--- a/tests/test_gs_pip_install.py
+++ b/tests/test_gs_pip_install.py
@@ -4,6 +4,7 @@ import unittest
import tempfile
import shutil
import os
+import sys
from unittest import mock
from click.testing import CliRunner
@@ -104,7 +105,7 @@ class TestInstall(unittest.TestCase):
mock_subprocess.call_count == 2
mock_subprocess.assert_any_call(
[
- f"{os.environ['HOME']}/.pyenv/shims/python",
+ sys.executable,
"-m",
"pip",
"install",
@@ -126,7 +127,7 @@ class TestInstall(unittest.TestCase):
mock_subprocess.call_count == 2
mock_subprocess.assert_any_call(
[
- f"{os.environ['HOME']}/.pyenv/shims/python",
+ sys.executable,
"-m",
"pip",
"install",
@@ -150,7 +151,7 @@ class TestInstall(unittest.TestCase):
)
mock_subprocess.assert_called_once_with(
[
- f"{os.environ['HOME']}/.pyenv/shims/python",
+ sys.executable,
"-m",
"pip",
"install",
|
gs_pip_install v0.2.4 installs in incorrect location
* gs_pip_install version: 0.2.4
* Python version: 3.6.8
* Operating System: Linux (Ubuntu)
### Description
when running gs_pip_install, it does not install in correct python system site package. The site package in my virtualenv was `/home/matthewwong/.local/share/virtualenvs/airflow-dag-utilities-W9jupXEt/lib/python3.6/site-packages` but it was installed in `/home/matthewwong/.pyenv/versions/3.6.8/lib/python3.6/site-packages`.
I suspect the reason it does this because `gs-pip-install` uses a different python to run `pip install`.
- gs_pip_install uses: `/home/matthewwong/.pyenv/shims/python`
- current venv uses: `/home/matthewwong/.local/share/virtualenvs/airflow-dag-utilities-W9jupXEt/bin/python`
### What I Did
Use version 0.2.3 instead
|
0.0
|
dd0e90a8897c4df97d6b547570a2072a88996032
|
[
"tests/test_gs_pip_install.py::TestInstall::test_install_packages_no_target_dir",
"tests/test_gs_pip_install.py::TestInstall::test_install_packages_with_extras",
"tests/test_gs_pip_install.py::TestInstall::test_install_packages_with_target_dir"
] |
[
"tests/test_gs_pip_install.py::TestInstall::test_cli_package_name",
"tests/test_gs_pip_install.py::TestInstall::test_cli_req_file",
"tests/test_gs_pip_install.py::TestInstall::test_download_packages",
"tests/test_gs_pip_install.py::TestInstall::test_strip_extras"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-17 19:22:09+00:00
|
mit
| 3,647 |
|
lundberg__respx-206
|
diff --git a/respx/patterns.py b/respx/patterns.py
index 37ab1d4..59a9e4d 100644
--- a/respx/patterns.py
+++ b/respx/patterns.py
@@ -504,9 +504,6 @@ def M(*patterns: Pattern, **lookups: Any) -> Optional[Pattern]:
extras = None
for pattern__lookup, value in lookups.items():
- if not value:
- continue
-
# Handle url pattern
if pattern__lookup == "url":
extras = parse_url_patterns(value)
@@ -535,6 +532,10 @@ def M(*patterns: Pattern, **lookups: Any) -> Optional[Pattern]:
lookup = Lookup(lookup_name) if lookup_name else None
pattern = P(value, lookup=lookup)
+ # Skip patterns with no value, exept when using equal lookup
+ if not pattern.value and pattern.lookup is not Lookup.EQUAL:
+ continue
+
patterns += (pattern,)
# Combine and merge patterns
|
lundberg/respx
|
019a8a839193af265b15c92a2b7f8dec67c6cb65
|
diff --git a/tests/test_patterns.py b/tests/test_patterns.py
index 0f06293..f90c561 100644
--- a/tests/test_patterns.py
+++ b/tests/test_patterns.py
@@ -66,6 +66,19 @@ def test_match_context():
assert match.context == {"host": "foo.bar", "slug": "baz"}
[email protected](
+ "kwargs,url,expected",
+ [
+ ({"params__eq": {}}, "https://foo.bar/", True),
+ ({"params__eq": {}}, "https://foo.bar/?x=y", False),
+ ({"params__contains": {}}, "https://foo.bar/?x=y", True),
+ ],
+)
+def test_m_pattern(kwargs, url, expected):
+ request = httpx.Request("GET", url)
+ assert bool(M(host="foo.bar", **kwargs).match(request)) is expected
+
+
@pytest.mark.parametrize(
"lookup,value,expected",
[
@@ -217,6 +230,8 @@ def test_path_pattern():
(Lookup.EQUAL, "y=2", "https://foo.bar/?x=1", False),
(Lookup.EQUAL, {"x": ANY}, "https://foo.bar/?x=1", True),
(Lookup.EQUAL, {"y": ANY}, "https://foo.bar/?x=1", False),
+ (Lookup.EQUAL, {}, "https://foo.bar/?x=1", False),
+ (Lookup.EQUAL, {}, "https://foo.bar/", True),
(Lookup.EQUAL, "x=1&y=2", "https://foo.bar/?x=1", False),
(Lookup.EQUAL, "y=2&x=1", "https://foo.bar/?x=1&y=2", True),
(Lookup.EQUAL, "y=3&x=2&x=1", "https://foo.bar/?x=1&x=2&y=3", False), # ordered
|
Cannot create a lookup to match empty params
https://github.com/lundberg/respx/blob/c76f632690cdfb2c878fe8fea999df0a26083eb3/respx/patterns.py#L502-L507
E.g. `respx_mock.get("https://example.com", params__eq={}).mock(...)` matches request with any query params including empty.
Looks like the value check must be more accurate.
respx version 0.19.2
|
0.0
|
019a8a839193af265b15c92a2b7f8dec67c6cb65
|
[
"tests/test_patterns.py::test_m_pattern[kwargs1-https://foo.bar/?x=y-False]"
] |
[
"tests/test_patterns.py::test_url_pattern_invalid",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar:8080/baz/-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-foo.bar-True]",
"tests/test_patterns.py::test_match_context",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params17-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_host_pattern[Lookup.REGEX-.+\\\\.bar-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data0-True]",
"tests/test_patterns.py::test_json_pattern_path[json5-foo__ham-spam-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value0-json0-True]",
"tests/test_patterns.py::test_merge_patterns",
"tests/test_patterns.py::test_m_pattern[kwargs2-https://foo.bar/?x=y-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-None-//foo.bar/-True]",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True1]",
"tests/test_patterns.py::test_params_pattern_hash",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies1-request_cookies1-False]",
"tests/test_patterns.py::test_json_pattern_path[json4-pk-123-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-https://foo.bar/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-443-https://foo.bar/-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies2-request_cookies2-True]",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-https://a.b/b-context7-https://a.b/baz/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params7-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-get-True]",
"tests/test_patterns.py::test_parse_url_patterns",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_url_pattern_hash",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https?://a.b/(?P<c>\\\\w+)/-context0-http://a.b/c/-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies5-None-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/c/-context3-https://a.b/c/-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.IN-scheme3-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-^https://a.b/.+$-context1-https://a.b/c/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/?x=y-context6-https://a.b?x=y-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_json_pattern_path[json2-ham__1__egg-yolk-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port8-https://foo.bar/-True]",
"tests/test_patterns.py::test_json_pattern_path[json0-foo__bar-baz-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS--https://foo.bar/-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-HTTPS-True]",
"tests/test_patterns.py::test_iter_pattern",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies3-request_cookies3-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value2-json2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params10-https://foo.bar/?x=2&x=3-False]",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/-True]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers1--False]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-https-True]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers0-request_headers0-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://foo.bar/-True]",
"tests/test_patterns.py::test_m_pattern[kwargs0-https://foo.bar/-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value3-json3-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-json-string-json-string-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params5-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value1-json1-False]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies4-request_cookies4-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies0-request_cookies0-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value5-json5-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params6-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_cookies_pattern__hash",
"tests/test_patterns.py::test_invalid_pattern",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params18-https://foo.bar/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/x/-context4-https://a.b/c/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params8-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params9-https://foo.bar/?x=1&x=2-True]",
"tests/test_patterns.py::test_unique_pattern_key",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL--https://foo.bar/-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-GET-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value4-False]",
"tests/test_patterns.py::test_path_pattern",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True0]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params15-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_json_pattern_path[json6-1__name-lundberg-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params4-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_bitwise_and",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_bitwise_operators[PATCH-https://foo.bar/-True]",
"tests/test_patterns.py::test_headers_pattern_hash",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-http-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params16-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-http://a.b/baz/-context8-https://a.b/baz/-False]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-ham.spam-False]",
"tests/test_patterns.py::test_bitwise_operators[PUT-https://foo.bar/-False]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-POST-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b?x=y-context5-https://a.b/?x=y-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-22-//foo.bar:22/baz/-True]",
"tests/test_patterns.py::test_json_pattern_path[json3-0__name-jonas-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2&x=1-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port9-https://foo.bar:8080/-False]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies6-request_cookies6-False]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value3-True]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://ham.spam/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port7-http://foo.bar/-True]",
"tests/test_patterns.py::test_json_pattern_path[json1-x-value1-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https://a.b/c/-context2-https://x.y/c/-False]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-24 10:48:35+00:00
|
bsd-3-clause
| 3,648 |
|
lundberg__respx-240
|
diff --git a/respx/patterns.py b/respx/patterns.py
index ed4c51d..3bf8d9b 100644
--- a/respx/patterns.py
+++ b/respx/patterns.py
@@ -413,7 +413,16 @@ class Path(Pattern):
self, value: Union[str, RegexPattern[str]]
) -> Union[str, RegexPattern[str]]:
if self.lookup in (Lookup.EQUAL, Lookup.STARTS_WITH) and isinstance(value, str):
- path = urljoin("/", value) # Ensure leading slash
+ # Percent encode path, i.e. revert parsed path by httpx.URL.
+ # Borrowed from HTTPX's "private" quote and percent_encode utilities.
+ path = "".join(
+ char
+ if char
+ in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~/"
+ else "".join(f"%{byte:02x}" for byte in char.encode("utf-8")).upper()
+ for char in value
+ )
+ path = urljoin("/", path) # Ensure leading slash
value = httpx.URL(path).path
elif self.lookup is Lookup.REGEX and isinstance(value, str):
value = re.compile(value)
|
lundberg/respx
|
d304a8585d3be54124d22155a6f252ef979072f9
|
diff --git a/tests/test_api.py b/tests/test_api.py
index 151da8d..c126408 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -82,6 +82,7 @@ async def test_http_methods(client):
("https://foo.bar/baz/", re.compile(r"^https://foo.bar/\w+/$")),
("https://foo.bar/baz/", (b"https", b"foo.bar", None, b"/baz/")),
("https://foo.bar:443/baz/", (b"https", b"foo.bar", 443, b"/baz/")),
+ ("https://foo.bar/%08", "https://foo.bar/%08"),
],
)
async def test_url_match(client, url, pattern):
|
URL pattern does not match or fails on URL-encoded special ascii symbols in URL
Hey!
I have faced a bug during an attempt to use respx with hypothesis to mock autogenerated URLs when encoded special symbols are present in the url path.
This code fails with the `AllMockedAssertionError`:
```python
import httpx
import respx
with respx.mock:
url = 'http://test.com/%0a'
respx.get(url=url).respond()
httpx.get(url)
```
This code fails with `httpx.InvalidURL`:
```python
with respx.mock:
url = 'http://test.com/%08'
respx.get(url=url).respond()
assert httpx.get(url).status_code == 200
```
Full traceback:
```
Traceback (most recent call last):
File "...", line 8, in <module>
respx.get(url=url).respond()
File ".../python3.9/site-packages/respx/api.py", line 74, in get
return mock.get(url, name=name, **lookups)
File ".../python3.9/site-packages/respx/router.py", line 174, in get
return self.request(method="GET", url=url, name=name, **lookups)
File ".../python3.9/site-packages/respx/router.py", line 165, in request
return self.route(method=method, url=url, name=name, **lookups)
File ".../python3.9/site-packages/respx/router.py", line 132, in route
route = Route(*patterns, **lookups)
File ".../python3.9/site-packages/respx/models.py", line 119, in __init__
self._pattern = M(*patterns, **lookups)
File ".../python3.9/site-packages/respx/patterns.py", line 544, in M
extras = parse_url_patterns(value)
File ".../python3.9/site-packages/respx/patterns.py", line 648, in parse_url_patterns
bases[Path.key] = Path(url.path, lookup=lookup)
File ".../python3.9/site-packages/respx/patterns.py", line 92, in __init__
self.value = self.clean(value)
File ".../python3.9/site-packages/respx/patterns.py", line 417, in clean
value = httpx.URL(path).path
File ".../python3.9/site-packages/httpx/_urls.py", line 113, in __init__
self._uri_reference = urlparse(url, **kwargs)
File ".../python3.9/site-packages/httpx/_urlparse.py", line 160, in urlparse
raise InvalidURL("Invalid non-printable ASCII character in URL")
httpx.InvalidURL: Invalid non-printable ASCII character in URL
```
I could reproduce this problem only with the `url` pattern, the following works fine:
```python
from urllib.parse import urlparse
import httpx
import respx
with respx.mock:
url = 'http://test.com/%08'
parsed = urlparse(url)
respx.get(scheme=parsed.scheme, host=parsed.hostname, path=parsed.path, params=parsed.params).respond()
assert httpx.get(url).status_code == 200
```
Versions:
* python 3.9.7
* respx==0.20.1
* httpx==0.24.0
|
0.0
|
d304a8585d3be54124d22155a6f252ef979072f9
|
[
"tests/test_api.py::test_url_match[https://foo.bar/%08-https://foo.bar/%08]"
] |
[
"tests/test_api.py::test_http_methods",
"tests/test_api.py::test_url_match[https://foo.bar-https://foo.bar]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-None]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-https://foo.bar/baz/]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-^https://foo.bar/\\\\w+/$]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-pattern5]",
"tests/test_api.py::test_url_match[https://foo.bar:443/baz/-pattern6]",
"tests/test_api.py::test_invalid_url_pattern",
"tests/test_api.py::test_repeated_pattern",
"tests/test_api.py::test_status_code",
"tests/test_api.py::test_headers[headers0-None-expected0]",
"tests/test_api.py::test_headers[headers1-None-expected1]",
"tests/test_api.py::test_headers[headers2-ham/spam-expected2]",
"tests/test_api.py::test_text_encoding[eldr\\xc3\\xa4v-eldr\\xe4v]",
"tests/test_api.py::test_text_encoding[\\xe4pple-\\xe4pple]",
"tests/test_api.py::test_text_encoding[Gehäusegr\\xf6\\xdfe-Gehäusegr\\xf6\\xdfe]",
"tests/test_api.py::test_content_variants[content-foobar-None0]",
"tests/test_api.py::test_content_variants[content-foobar-None1]",
"tests/test_api.py::test_content_variants[json-value2-application/json]",
"tests/test_api.py::test_content_variants[json-value3-application/json]",
"tests/test_api.py::test_content_variants[text-foobar-text/plain;",
"tests/test_api.py::test_content_variants[html-<strong>foobar</strong>-text/html;",
"tests/test_api.py::test_json_content[content0-headers0-expected_headers0]",
"tests/test_api.py::test_json_content[content1-headers1-expected_headers1]",
"tests/test_api.py::test_json_post_body",
"tests/test_api.py::test_raising_content",
"tests/test_api.py::test_callable_content",
"tests/test_api.py::test_request_callback",
"tests/test_api.py::test_pass_through[httpcore-route0-True]",
"tests/test_api.py::test_pass_through[httpx-route1-True]",
"tests/test_api.py::test_pass_through[httpcore-route2-False]",
"tests/test_api.py::test_pass_through[httpcore-route3-True]",
"tests/test_api.py::test_parallel_requests",
"tests/test_api.py::test_method_case[DELETE-delete]",
"tests/test_api.py::test_method_case[delete-delete]",
"tests/test_api.py::test_method_case[GET-get]",
"tests/test_api.py::test_method_case[get-get]",
"tests/test_api.py::test_method_case[HEAD-head]",
"tests/test_api.py::test_method_case[head-head]",
"tests/test_api.py::test_method_case[OPTIONS-options]",
"tests/test_api.py::test_method_case[options-options]",
"tests/test_api.py::test_method_case[PATCH-patch]",
"tests/test_api.py::test_method_case[patch-patch]",
"tests/test_api.py::test_method_case[POST-post]",
"tests/test_api.py::test_method_case[post-post]",
"tests/test_api.py::test_method_case[PUT-put]",
"tests/test_api.py::test_method_case[put-put]",
"tests/test_api.py::test_pop",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar0]",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar1]",
"tests/test_api.py::test_params_match[https://foo/-params2-https://foo/-call_params2]",
"tests/test_api.py::test_params_match[https://foo/-params3-https://foo/-call_params3]",
"tests/test_api.py::test_params_match[https://foo/-params4-https://foo/-call_params4]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar-baz=qux]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_params_match[https://foo/(\\\\w+)/-foo=bar-https://foo/bar/-foo=bar]",
"tests/test_api.py::test_params_match[url8-foo=bar-https://foo/-foo=bar]",
"tests/test_api.py::test_params_match[url9-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_build_url_base[None-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/baz/-None]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/(\\\\w+)/]",
"tests/test_api.py::test_add",
"tests/test_api.py::test_respond",
"tests/test_api.py::test_async_post_content[kwargs0]",
"tests/test_api.py::test_async_post_content[kwargs1]",
"tests/test_api.py::test_async_post_content[kwargs2]",
"tests/test_api.py::test_async_post_content[kwargs3]",
"tests/test_api.py::test_async_post_content[kwargs4]"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-20 15:01:15+00:00
|
bsd-3-clause
| 3,649 |
|
lundberg__respx-252
|
diff --git a/docs/api.md b/docs/api.md
index 7432767..ae54c7c 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -309,9 +309,9 @@ respx.post("https://example.org/", content__contains="bar")
```
### Data
-Matches request *form data*, using [eq](#eq) as default lookup.
+Matches request *form data*, excluding files, using [eq](#eq) as default lookup.
> Key: `data`
-> Lookups: [eq](#eq)
+> Lookups: [eq](#eq), [contains](#contains)
``` python
respx.post("https://example.org/", data={"foo": "bar"})
```
diff --git a/respx/patterns.py b/respx/patterns.py
index 3bf8d9b..75cb815 100644
--- a/respx/patterns.py
+++ b/respx/patterns.py
@@ -25,6 +25,8 @@ from urllib.parse import urljoin
import httpx
+from respx.utils import MultiItems, decode_data
+
from .types import (
URL as RawURL,
CookieTypes,
@@ -536,14 +538,16 @@ class JSON(ContentMixin, PathPattern):
return jsonlib.dumps(value, sort_keys=True)
-class Data(ContentMixin, Pattern):
- lookups = (Lookup.EQUAL,)
+class Data(MultiItemsMixin, Pattern):
+ lookups = (Lookup.EQUAL, Lookup.CONTAINS)
key = "data"
- value: bytes
+ value: MultiItems
+
+ def clean(self, value: Dict) -> MultiItems:
+ return MultiItems(value)
- def clean(self, value: Dict) -> bytes:
- request = httpx.Request("POST", "/", data=value)
- data = request.read()
+ def parse(self, request: httpx.Request) -> Any:
+ data, _ = decode_data(request)
return data
diff --git a/respx/utils.py b/respx/utils.py
new file mode 100644
index 0000000..434c30d
--- /dev/null
+++ b/respx/utils.py
@@ -0,0 +1,73 @@
+import email
+from email.message import Message
+from typing import List, Tuple, cast
+from urllib.parse import parse_qsl
+
+import httpx
+
+
+class MultiItems(dict):
+ def get_list(self, key: str) -> List[str]:
+ try:
+ return [self[key]]
+ except KeyError: # pragma: no cover
+ return []
+
+ def multi_items(self) -> List[Tuple[str, str]]:
+ return list(self.items())
+
+
+def _parse_multipart_form_data(
+ content: bytes, *, content_type: str, encoding: str
+) -> Tuple[MultiItems, MultiItems]:
+ form_data = b"\r\n".join(
+ (
+ b"MIME-Version: 1.0",
+ b"Content-Type: " + content_type.encode(encoding),
+ b"\r\n" + content,
+ )
+ )
+ data = MultiItems()
+ files = MultiItems()
+ for payload in email.message_from_bytes(form_data).get_payload():
+ payload = cast(Message, payload)
+ name = payload.get_param("name", header="Content-Disposition")
+ filename = payload.get_filename()
+ content_type = payload.get_content_type()
+ value = payload.get_payload(decode=True)
+ assert isinstance(value, bytes)
+ if content_type.startswith("text/") and filename is None:
+ # Text field
+ data[name] = value.decode(payload.get_content_charset() or "utf-8")
+ else:
+ # File field
+ files[name] = filename, value
+
+ return data, files
+
+
+def _parse_urlencoded_data(content: bytes, *, encoding: str) -> MultiItems:
+ return MultiItems(
+ (key, value)
+ for key, value in parse_qsl(content.decode(encoding), keep_blank_values=True)
+ )
+
+
+def decode_data(request: httpx.Request) -> Tuple[MultiItems, MultiItems]:
+ content = request.read()
+ content_type = request.headers.get("Content-Type", "")
+
+ if content_type.startswith("multipart/form-data"):
+ data, files = _parse_multipart_form_data(
+ content,
+ content_type=content_type,
+ encoding=request.headers.encoding,
+ )
+ else:
+ data = _parse_urlencoded_data(
+ content,
+ encoding=request.headers.encoding,
+ )
+ files = MultiItems()
+
+ return data, files
|
lundberg/respx
|
58ad17e9cee2ea81183a5855acbf4bf45dc9faa0
|
diff --git a/tests/test_api.py b/tests/test_api.py
index c126408..597c589 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -263,6 +263,15 @@ def test_json_post_body():
assert get_route.called
+def test_data_post_body():
+ with respx.mock:
+ url = "https://foo.bar/"
+ route = respx.post(url, data={"foo": "bar"}) % 201
+ response = httpx.post(url, data={"foo": "bar"}, files={"file": b"..."})
+ assert response.status_code == 201
+ assert route.called
+
+
async def test_raising_content(client):
async with MockRouter() as respx_mock:
url = "https://foo.bar/"
diff --git a/tests/test_patterns.py b/tests/test_patterns.py
index e704b40..f492307 100644
--- a/tests/test_patterns.py
+++ b/tests/test_patterns.py
@@ -323,14 +323,69 @@ def test_content_pattern(lookup, content, expected):
@pytest.mark.parametrize(
- ("lookup", "data", "expected"),
+ ("lookup", "data", "request_data", "expected"),
[
- (Lookup.EQUAL, {"foo": "bar", "ham": "spam"}, True),
+ (
+ Lookup.EQUAL,
+ {"foo": "bar", "ham": "spam"},
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"foo": "bar", "ham": "spam"},
+ {"ham": "spam", "foo": "bar"},
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"uni": "äpple", "mixed": "Gehäusegröße"},
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"blank_value": ""},
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"x": "a"},
+ {"x": "b"},
+ False,
+ ),
+ (
+ Lookup.EQUAL,
+ {"foo": "bar"},
+ {"foo": "bar", "ham": "spam"},
+ False,
+ ),
+ (
+ Lookup.CONTAINS,
+ {"foo": "bar"},
+ {"foo": "bar", "ham": "spam"},
+ True,
+ ),
],
)
-def test_data_pattern(lookup, data, expected):
- request = httpx.Request("POST", "https://foo.bar/", data=data)
- match = Data(data, lookup=lookup).match(request)
+def test_data_pattern(lookup, data, request_data, expected):
+ request_with_data = httpx.Request(
+ "POST",
+ "https://foo.bar/",
+ data=request_data or data,
+ )
+ request_with_data_and_files = httpx.Request(
+ "POST",
+ "https://foo.bar/",
+ data=request_data or data,
+ files={"upload-file": ("report.xls", b"<...>", "application/vnd.ms-excel")},
+ )
+
+ match = Data(data, lookup=lookup).match(request_with_data)
+ assert bool(match) is expected
+
+ match = Data(data, lookup=lookup).match(request_with_data_and_files)
assert bool(match) is expected
|
Support multipart encoding in Data pattern
`Data` pattern only handles urlencoded data, not multipart encoded. This is troublesome because by addition of `files` to the request, the encoding changes and `Data` pattern fails to match the request.
### MWE
```python
import httpx
import respx
with respx.mock() as rsps:
rsps.post('http://example.org/', data={'answer': '42'})
httpx.post('http://example.org', data={'answer': '42'}) # OK
with respx.mock() as rsps:
rsps.post('http://example.org/', data={'answer': '42'})
httpx.post('http://example.org', data={'answer': '42'}, files={'file': b'content'})
# >>> respx.models.AllMockedAssertionError: RESPX: <Request('POST', 'http://example.org/')> not mocked!
```
Related to #115
|
0.0
|
58ad17e9cee2ea81183a5855acbf4bf45dc9faa0
|
[
"tests/test_api.py::test_data_post_body",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data0-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data1-request_data1-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data2-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data3-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.CONTAINS-data6-request_data6-True]"
] |
[
"tests/test_api.py::test_http_methods",
"tests/test_api.py::test_url_match[https://foo.bar-https://foo.bar]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-None]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-https://foo.bar/baz/]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-^https://foo.bar/\\\\w+/$]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-pattern5]",
"tests/test_api.py::test_url_match[https://foo.bar:443/baz/-pattern6]",
"tests/test_api.py::test_url_match[https://foo.bar/%08-https://foo.bar/%08]",
"tests/test_api.py::test_invalid_url_pattern",
"tests/test_api.py::test_repeated_pattern",
"tests/test_api.py::test_status_code",
"tests/test_api.py::test_headers[headers0-None-expected0]",
"tests/test_api.py::test_headers[headers1-None-expected1]",
"tests/test_api.py::test_headers[headers2-ham/spam-expected2]",
"tests/test_api.py::test_text_encoding[eldr\\xc3\\xa4v-eldr\\xe4v]",
"tests/test_api.py::test_text_encoding[\\xe4pple-\\xe4pple]",
"tests/test_api.py::test_text_encoding[Gehäusegr\\xf6\\xdfe-Gehäusegr\\xf6\\xdfe]",
"tests/test_api.py::test_content_variants[content-foobar-None0]",
"tests/test_api.py::test_content_variants[content-foobar-None1]",
"tests/test_api.py::test_content_variants[json-value2-application/json]",
"tests/test_api.py::test_content_variants[json-value3-application/json]",
"tests/test_api.py::test_content_variants[text-foobar-text/plain;",
"tests/test_api.py::test_content_variants[html-<strong>foobar</strong>-text/html;",
"tests/test_api.py::test_json_content[content0-headers0-expected_headers0]",
"tests/test_api.py::test_json_content[content1-headers1-expected_headers1]",
"tests/test_api.py::test_json_post_body",
"tests/test_api.py::test_raising_content",
"tests/test_api.py::test_callable_content",
"tests/test_api.py::test_request_callback",
"tests/test_api.py::test_pass_through[httpcore-route0-True]",
"tests/test_api.py::test_pass_through[httpx-route1-True]",
"tests/test_api.py::test_pass_through[httpcore-route2-False]",
"tests/test_api.py::test_pass_through[httpcore-route3-True]",
"tests/test_api.py::test_parallel_requests",
"tests/test_api.py::test_method_case[DELETE-delete]",
"tests/test_api.py::test_method_case[delete-delete]",
"tests/test_api.py::test_method_case[GET-get]",
"tests/test_api.py::test_method_case[get-get]",
"tests/test_api.py::test_method_case[HEAD-head]",
"tests/test_api.py::test_method_case[head-head]",
"tests/test_api.py::test_method_case[OPTIONS-options]",
"tests/test_api.py::test_method_case[options-options]",
"tests/test_api.py::test_method_case[PATCH-patch]",
"tests/test_api.py::test_method_case[patch-patch]",
"tests/test_api.py::test_method_case[POST-post]",
"tests/test_api.py::test_method_case[post-post]",
"tests/test_api.py::test_method_case[PUT-put]",
"tests/test_api.py::test_method_case[put-put]",
"tests/test_api.py::test_pop",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar0]",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar1]",
"tests/test_api.py::test_params_match[https://foo/-params2-https://foo/-call_params2]",
"tests/test_api.py::test_params_match[https://foo/-params3-https://foo/-call_params3]",
"tests/test_api.py::test_params_match[https://foo/-params4-https://foo/-call_params4]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar-baz=qux]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_params_match[https://foo/(\\\\w+)/-foo=bar-https://foo/bar/-foo=bar]",
"tests/test_api.py::test_params_match[url8-foo=bar-https://foo/-foo=bar]",
"tests/test_api.py::test_params_match[url9-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_build_url_base[None-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/baz/-None]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/(\\\\w+)/]",
"tests/test_api.py::test_add",
"tests/test_api.py::test_respond",
"tests/test_api.py::test_async_post_content[kwargs0]",
"tests/test_api.py::test_async_post_content[kwargs1]",
"tests/test_api.py::test_async_post_content[kwargs2]",
"tests/test_api.py::test_async_post_content[kwargs3]",
"tests/test_api.py::test_async_post_content[kwargs4]",
"tests/test_patterns.py::test_bitwise_and",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://ham.spam/-True]",
"tests/test_patterns.py::test_bitwise_operators[PATCH-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[PUT-https://foo.bar/-False]",
"tests/test_patterns.py::test_match_context",
"tests/test_patterns.py::test_noop_pattern",
"tests/test_patterns.py::test_m_pattern[kwargs0-https://foo.bar/-True]",
"tests/test_patterns.py::test_m_pattern[kwargs1-https://foo.bar/?x=y-False]",
"tests/test_patterns.py::test_m_pattern[kwargs2-https://foo.bar/?x=y-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-GET-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-get-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-POST-False]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value3-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value4-False]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers0-request_headers0-True]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers1--False]",
"tests/test_patterns.py::test_headers_pattern_hash",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies0-request_cookies0-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies1-request_cookies1-False]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies2-request_cookies2-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies3-request_cookies3-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies4-request_cookies4-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies5-None-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies6-request_cookies6-False]",
"tests/test_patterns.py::test_cookies_pattern__hash",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-https-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-HTTPS-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-http-False]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.IN-scheme3-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-foo.bar-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-ham.spam-False]",
"tests/test_patterns.py::test_host_pattern[Lookup.REGEX-.+\\\\.bar-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-443-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-https://foo.bar/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar:8080/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-22-//foo.bar:22/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-None-//foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port7-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port8-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port9-https://foo.bar:8080/-False]",
"tests/test_patterns.py::test_path_pattern",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params5-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params6-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params7-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params8-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params9-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params10-https://foo.bar/?x=1&x=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params11-https://foo.bar/?x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x-https://foo.bar/?x-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params18-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params19-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params20-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params21-https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2&x=1-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern_hash",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https?://a.b/(?P<c>\\\\w+)/-context0-http://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-^https://a.b/.+$-context1-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https://a.b/c/-context2-https://x.y/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/c/-context3-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/x/-context4-https://a.b/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b?x=y-context5-https://a.b/?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/?x=y-context6-https://a.b?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-https://a.b/b-context7-https://a.b/baz/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-http://a.b/baz/-context8-https://a.b/baz/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-value9-context9-https://[FE80::1]-True]",
"tests/test_patterns.py::test_url_pattern_invalid",
"tests/test_patterns.py::test_url_pattern_hash",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-baz-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data4-request_data4-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data5-request_data5-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value0-json0-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value1-json1-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value2-json2-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value3-json3-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-json-string-json-string-True]",
"tests/test_patterns.py::test_json_pattern_path[json0-foo__bar-baz-True]",
"tests/test_patterns.py::test_json_pattern_path[json1-x-value1-True]",
"tests/test_patterns.py::test_json_pattern_path[json2-ham__1__egg-yolk-True]",
"tests/test_patterns.py::test_json_pattern_path[json3-0__name-jonas-True]",
"tests/test_patterns.py::test_json_pattern_path[json4-pk-123-True]",
"tests/test_patterns.py::test_json_pattern_path[json5-foo__ham-spam-False]",
"tests/test_patterns.py::test_json_pattern_path[json6-1__name-lundberg-False]",
"tests/test_patterns.py::test_invalid_pattern",
"tests/test_patterns.py::test_iter_pattern",
"tests/test_patterns.py::test_parse_url_patterns",
"tests/test_patterns.py::test_merge_patterns",
"tests/test_patterns.py::test_unique_pattern_key"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-15 14:00:18+00:00
|
bsd-3-clause
| 3,650 |
|
lundberg__respx-253
|
diff --git a/docs/api.md b/docs/api.md
index ae54c7c..f43c7a8 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -316,6 +316,17 @@ Matches request *form data*, excluding files, using [eq](#eq) as default lookup.
respx.post("https://example.org/", data={"foo": "bar"})
```
+### Files
+Matches files within request *form data*, using [contains](#contains) as default lookup.
+> Key: `files`
+> Lookups: [contains](#contains), [eq](#eq)
+``` python
+respx.post("https://example.org/", files={"some_file": b"..."})
+respx.post("https://example.org/", files={"some_file": ANY})
+respx.post("https://example.org/", files={"some_file": ("filename.txt", b"...")})
+respx.post("https://example.org/", files={"some_file": ("filename.txt", ANY)})
+```
+
### JSON
Matches request *json* content, using [eq](#eq) as default lookup.
> Key: `json`
diff --git a/respx/patterns.py b/respx/patterns.py
index 75cb815..d8b00b5 100644
--- a/respx/patterns.py
+++ b/respx/patterns.py
@@ -1,5 +1,6 @@
import json as jsonlib
import operator
+import pathlib
import re
from abc import ABC
from enum import Enum
@@ -12,6 +13,7 @@ from typing import (
ClassVar,
Dict,
List,
+ Mapping,
Optional,
Pattern as RegexPattern,
Sequence,
@@ -30,8 +32,10 @@ from respx.utils import MultiItems, decode_data
from .types import (
URL as RawURL,
CookieTypes,
+ FileTypes,
HeaderTypes,
QueryParamTypes,
+ RequestFiles,
URLPatternTypes,
)
@@ -551,6 +555,38 @@ class Data(MultiItemsMixin, Pattern):
return data
+class Files(MultiItemsMixin, Pattern):
+ lookups = (Lookup.CONTAINS, Lookup.EQUAL)
+ key = "files"
+ value: MultiItems
+
+ def _normalize_file_value(self, value: FileTypes) -> Tuple[Any, ...]:
+ # Mimic httpx `FileField` to normalize `files` kwarg to shortest tuple style
+ if isinstance(value, tuple):
+ filename, fileobj = value[:2]
+ else:
+ try:
+ filename = pathlib.Path(str(getattr(value, "name"))).name # noqa: B009
+ except AttributeError:
+ filename = ANY
+ fileobj = value
+
+ return filename, fileobj
+
+ def clean(self, value: RequestFiles) -> MultiItems:
+ if isinstance(value, Mapping):
+ value = list(value.items())
+
+ files = MultiItems(
+ (name, self._normalize_file_value(file_value)) for name, file_value in value
+ )
+ return files
+
+ def parse(self, request: httpx.Request) -> Any:
+ _, files = decode_data(request)
+ return files
+
+
def M(*patterns: Pattern, **lookups: Any) -> Pattern:
extras = None
diff --git a/respx/types.py b/respx/types.py
index 0ce2101..1cf3e0c 100644
--- a/respx/types.py
+++ b/respx/types.py
@@ -1,4 +1,5 @@
from typing import (
+ IO,
Any,
AsyncIterable,
Awaitable,
@@ -7,6 +8,7 @@ from typing import (
Iterable,
Iterator,
List,
+ Mapping,
Optional,
Pattern,
Sequence,
@@ -53,3 +55,17 @@ SideEffectTypes = Union[
Type[Exception],
Iterator[SideEffectListTypes],
]
+
+# Borrowed from HTTPX's "private" types.
+FileContent = Union[IO[bytes], bytes, str]
+FileTypes = Union[
+ # file (or bytes)
+ FileContent,
+ # (filename, file (or bytes))
+ Tuple[Optional[str], FileContent],
+ # (filename, file (or bytes), content_type)
+ Tuple[Optional[str], FileContent, Optional[str]],
+ # (filename, file (or bytes), content_type, headers)
+ Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
+]
+RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
|
lundberg/respx
|
07ae887a4fdd1841ffad81daa05437dfbf56be8c
|
diff --git a/tests/test_api.py b/tests/test_api.py
index 597c589..ef1dddd 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -272,6 +272,16 @@ def test_data_post_body():
assert route.called
+def test_files_post_body():
+ with respx.mock:
+ url = "https://foo.bar/"
+ file = ("file", ("filename.txt", b"...", "text/plain", {"X-Foo": "bar"}))
+ route = respx.post(url, files={"file": mock.ANY}) % 201
+ response = httpx.post(url, files=[file])
+ assert response.status_code == 201
+ assert route.called
+
+
async def test_raising_content(client):
async with MockRouter() as respx_mock:
url = "https://foo.bar/"
diff --git a/tests/test_patterns.py b/tests/test_patterns.py
index f492307..dda7914 100644
--- a/tests/test_patterns.py
+++ b/tests/test_patterns.py
@@ -10,6 +10,7 @@ from respx.patterns import (
Content,
Cookies,
Data,
+ Files,
Headers,
Host,
Lookup,
@@ -389,6 +390,112 @@ def test_data_pattern(lookup, data, request_data, expected):
assert bool(match) is expected
[email protected](
+ ("lookup", "files", "request_files", "expected"),
+ [
+ (
+ Lookup.EQUAL,
+ [("file_1", b"foo..."), ("file_2", b"bar...")],
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"file_1": b"foo...", "file_2": b"bar..."},
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"file_1": ANY},
+ {"file_1": b"foobar..."},
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"bar..."),
+ },
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"file_1": ("filename_1.txt", ANY)},
+ {"file_1": ("filename_1.txt", b"...")},
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"upload": b"foo..."},
+ {"upload": b"bar..."}, # Wrong file data
+ False,
+ ),
+ (
+ Lookup.EQUAL,
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"bar..."),
+ },
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"ham..."), # Wrong file data
+ },
+ False,
+ ),
+ (
+ Lookup.CONTAINS,
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ },
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"bar..."),
+ },
+ True,
+ ),
+ (
+ Lookup.CONTAINS,
+ {
+ "file_1": ("filename_1.txt", ANY),
+ },
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"bar..."),
+ },
+ True,
+ ),
+ (
+ Lookup.CONTAINS,
+ [("file_1", ANY)],
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"bar..."),
+ },
+ True,
+ ),
+ (
+ Lookup.CONTAINS,
+ [("file_1", b"ham...")],
+ {
+ "file_1": ("filename_1.txt", b"foo..."),
+ "file_2": ("filename_2.txt", b"bar..."),
+ },
+ False,
+ ),
+ ],
+)
+def test_files_pattern(lookup, files, request_files, expected):
+ request = httpx.Request(
+ "POST",
+ "https://foo.bar/",
+ files=request_files or files,
+ )
+ match = Files(files, lookup=lookup).match(request)
+ assert bool(match) is expected
+
+
@pytest.mark.parametrize(
("lookup", "value", "json", "expected"),
[
|
Add a files pattern?
As mentioned in #87, it might be of interest to allow matching a request on files being sent.
- What do we want to match on?
- Is it even possible once internal `HTTPX` request is built.
```py
route = respx.post("https://example.org/", files=?)
```
Since uploading files with `HTTPX` supports multiple arg types, it might *only* be possible to match on file names, if given. Reading a file object's content could affect `HTTPX` internals, if not handled properly, like reseting seek point etc.
Needs further investigation, [multiple files](https://github.com/encode/httpx/pull/1032/files).
|
0.0
|
07ae887a4fdd1841ffad81daa05437dfbf56be8c
|
[
"tests/test_api.py::test_http_methods",
"tests/test_api.py::test_url_match[https://foo.bar-https://foo.bar]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-None]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-https://foo.bar/baz/]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-^https://foo.bar/\\\\w+/$]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-pattern5]",
"tests/test_api.py::test_url_match[https://foo.bar:443/baz/-pattern6]",
"tests/test_api.py::test_url_match[https://foo.bar/%08-https://foo.bar/%08]",
"tests/test_api.py::test_invalid_url_pattern",
"tests/test_api.py::test_repeated_pattern",
"tests/test_api.py::test_status_code",
"tests/test_api.py::test_headers[headers0-None-expected0]",
"tests/test_api.py::test_headers[headers1-None-expected1]",
"tests/test_api.py::test_headers[headers2-ham/spam-expected2]",
"tests/test_api.py::test_text_encoding[eldr\\xc3\\xa4v-eldr\\xe4v]",
"tests/test_api.py::test_text_encoding[\\xe4pple-\\xe4pple]",
"tests/test_api.py::test_text_encoding[Gehäusegr\\xf6\\xdfe-Gehäusegr\\xf6\\xdfe]",
"tests/test_api.py::test_content_variants[content-foobar-None0]",
"tests/test_api.py::test_content_variants[content-foobar-None1]",
"tests/test_api.py::test_content_variants[json-value2-application/json]",
"tests/test_api.py::test_content_variants[json-value3-application/json]",
"tests/test_api.py::test_content_variants[text-foobar-text/plain;",
"tests/test_api.py::test_content_variants[html-<strong>foobar</strong>-text/html;",
"tests/test_api.py::test_json_content[content0-headers0-expected_headers0]",
"tests/test_api.py::test_json_content[content1-headers1-expected_headers1]",
"tests/test_api.py::test_json_post_body",
"tests/test_api.py::test_data_post_body",
"tests/test_api.py::test_files_post_body",
"tests/test_api.py::test_raising_content",
"tests/test_api.py::test_callable_content",
"tests/test_api.py::test_request_callback",
"tests/test_api.py::test_pass_through[httpcore-route0-True]",
"tests/test_api.py::test_pass_through[httpx-route1-True]",
"tests/test_api.py::test_pass_through[httpcore-route2-False]",
"tests/test_api.py::test_pass_through[httpcore-route3-True]",
"tests/test_api.py::test_parallel_requests",
"tests/test_api.py::test_method_case[DELETE-delete]",
"tests/test_api.py::test_method_case[delete-delete]",
"tests/test_api.py::test_method_case[GET-get]",
"tests/test_api.py::test_method_case[get-get]",
"tests/test_api.py::test_method_case[HEAD-head]",
"tests/test_api.py::test_method_case[head-head]",
"tests/test_api.py::test_method_case[OPTIONS-options]",
"tests/test_api.py::test_method_case[options-options]",
"tests/test_api.py::test_method_case[PATCH-patch]",
"tests/test_api.py::test_method_case[patch-patch]",
"tests/test_api.py::test_method_case[POST-post]",
"tests/test_api.py::test_method_case[post-post]",
"tests/test_api.py::test_method_case[PUT-put]",
"tests/test_api.py::test_method_case[put-put]",
"tests/test_api.py::test_pop",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar0]",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar1]",
"tests/test_api.py::test_params_match[https://foo/-params2-https://foo/-call_params2]",
"tests/test_api.py::test_params_match[https://foo/-params3-https://foo/-call_params3]",
"tests/test_api.py::test_params_match[https://foo/-params4-https://foo/-call_params4]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar-baz=qux]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_params_match[https://foo/(\\\\w+)/-foo=bar-https://foo/bar/-foo=bar]",
"tests/test_api.py::test_params_match[url8-foo=bar-https://foo/-foo=bar]",
"tests/test_api.py::test_params_match[url9-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_build_url_base[None-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/baz/-None]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/(\\\\w+)/]",
"tests/test_api.py::test_add",
"tests/test_api.py::test_respond",
"tests/test_api.py::test_async_post_content[kwargs0]",
"tests/test_api.py::test_async_post_content[kwargs1]",
"tests/test_api.py::test_async_post_content[kwargs2]",
"tests/test_api.py::test_async_post_content[kwargs3]",
"tests/test_api.py::test_async_post_content[kwargs4]",
"tests/test_patterns.py::test_bitwise_and",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://ham.spam/-True]",
"tests/test_patterns.py::test_bitwise_operators[PATCH-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[PUT-https://foo.bar/-False]",
"tests/test_patterns.py::test_match_context",
"tests/test_patterns.py::test_noop_pattern",
"tests/test_patterns.py::test_m_pattern[kwargs0-https://foo.bar/-True]",
"tests/test_patterns.py::test_m_pattern[kwargs1-https://foo.bar/?x=y-False]",
"tests/test_patterns.py::test_m_pattern[kwargs2-https://foo.bar/?x=y-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-GET-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-get-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-POST-False]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value3-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value4-False]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers0-request_headers0-True]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers1--False]",
"tests/test_patterns.py::test_headers_pattern_hash",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies0-request_cookies0-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies1-request_cookies1-False]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies2-request_cookies2-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies3-request_cookies3-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies4-request_cookies4-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies5-None-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies6-request_cookies6-False]",
"tests/test_patterns.py::test_cookies_pattern__hash",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-https-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-HTTPS-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-http-False]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.IN-scheme3-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-foo.bar-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-ham.spam-False]",
"tests/test_patterns.py::test_host_pattern[Lookup.REGEX-.+\\\\.bar-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-443-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-https://foo.bar/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar:8080/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-22-//foo.bar:22/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-None-//foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port7-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port8-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port9-https://foo.bar:8080/-False]",
"tests/test_patterns.py::test_path_pattern",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params5-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params6-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params7-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params8-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params9-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params10-https://foo.bar/?x=1&x=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params11-https://foo.bar/?x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x-https://foo.bar/?x-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params18-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params19-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params20-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params21-https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2&x=1-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern_hash",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https?://a.b/(?P<c>\\\\w+)/-context0-http://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-^https://a.b/.+$-context1-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https://a.b/c/-context2-https://x.y/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/c/-context3-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/x/-context4-https://a.b/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b?x=y-context5-https://a.b/?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/?x=y-context6-https://a.b?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-https://a.b/b-context7-https://a.b/baz/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-http://a.b/baz/-context8-https://a.b/baz/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-value9-context9-https://[FE80::1]-True]",
"tests/test_patterns.py::test_url_pattern_invalid",
"tests/test_patterns.py::test_url_pattern_hash",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-baz-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data0-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data1-request_data1-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data2-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data3-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data4-request_data4-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data5-request_data5-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.CONTAINS-data6-request_data6-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files0-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files1-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files2-request_files2-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files3-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files4-request_files4-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files5-request_files5-False]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files6-request_files6-False]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files7-request_files7-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files8-request_files8-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files9-request_files9-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files10-request_files10-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value0-json0-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value1-json1-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value2-json2-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value3-json3-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-json-string-json-string-True]",
"tests/test_patterns.py::test_json_pattern_path[json0-foo__bar-baz-True]",
"tests/test_patterns.py::test_json_pattern_path[json1-x-value1-True]",
"tests/test_patterns.py::test_json_pattern_path[json2-ham__1__egg-yolk-True]",
"tests/test_patterns.py::test_json_pattern_path[json3-0__name-jonas-True]",
"tests/test_patterns.py::test_json_pattern_path[json4-pk-123-True]",
"tests/test_patterns.py::test_json_pattern_path[json5-foo__ham-spam-False]",
"tests/test_patterns.py::test_json_pattern_path[json6-1__name-lundberg-False]",
"tests/test_patterns.py::test_invalid_pattern",
"tests/test_patterns.py::test_iter_pattern",
"tests/test_patterns.py::test_parse_url_patterns",
"tests/test_patterns.py::test_merge_patterns",
"tests/test_patterns.py::test_unique_pattern_key"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-15 16:03:34+00:00
|
bsd-3-clause
| 3,651 |
|
lundberg__respx-254
|
diff --git a/docs/api.md b/docs/api.md
index f43c7a8..170d368 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -133,15 +133,17 @@ Setter for the [side effect](guide.md#mock-with-a-side-effect) to trigger.
Shortcut for creating and mocking a `HTTPX` [Response](#response).
-> <code>route.<strong>respond</strong>(*status_code=200, headers=None, content=None, text=None, html=None, json=None, stream=None*)</strong></code>
+> <code>route.<strong>respond</strong>(*status_code=200, headers=None, cookies=None, content=None, text=None, html=None, json=None, stream=None, content_type=None*)</strong></code>
>
> **Parameters:**
>
> * **status_code** - *(optional) int - default: `200`*
> Response status code to mock.
-> * **headers** - *(optional) dict*
+> * **headers** - *(optional) dict | Sequence[tuple[str, str]]*
> Response headers to mock.
-> * **content** - *(optional) bytes | str | iterable bytes*
+> * **cookies** - *(optional) dict | Sequence[tuple[str, str]] | Sequence[SetCookie]*
+> Response cookies to mock as `Set-Cookie` headers. See [SetCookie](#setcookie).
+> * **content** - *(optional) bytes | str | Iterable[bytes]*
> Response raw content to mock.
> * **text** - *(optional) str*
> Response *text* content to mock, with automatic content-type header added.
@@ -151,6 +153,8 @@ Shortcut for creating and mocking a `HTTPX` [Response](#response).
> Response *JSON* content to mock, with automatic content-type header added.
> * **stream** - *(optional) Iterable[bytes]*
> Response *stream* to mock.
+> * **content_type** - *(optional) str*
+> Response `Content-Type` header to mock.
>
> **Returns:** `Route`
@@ -191,6 +195,24 @@ Shortcut for creating and mocking a `HTTPX` [Response](#response).
> * **stream** - *(optional) Iterable[bytes]*
> Content *stream*.
+!!! tip "Cookies"
+ Use [respx.SetCookie(...)](#setcookie) to produce `Set-Cookie` headers.
+
+---
+
+## SetCookie
+
+A utility to render a `("Set-Cookie", <cookie header value>)` tuple. See route [respond](#respond) shortcut for alternative use.
+
+> <code>respx.<strong>SetCookie</strong>(*name, value, path=None, domain=None, expires=None, max_age=None, http_only=False, same_site=None, secure=False, partitioned=False*)</strong></code>
+
+``` python
+import respx
+respx.post("https://example.org/").mock(
+ return_value=httpx.Response(200, headers=[SetCookie("foo", "bar")])
+)
+```
+
---
## Patterns
diff --git a/respx/__init__.py b/respx/__init__.py
index 89083a4..13694fd 100644
--- a/respx/__init__.py
+++ b/respx/__init__.py
@@ -2,6 +2,7 @@ from .__version__ import __version__
from .handlers import ASGIHandler, WSGIHandler
from .models import MockResponse, Route
from .router import MockRouter, Router
+from .utils import SetCookie
from .api import ( # isort:skip
mock,
@@ -24,6 +25,7 @@ from .api import ( # isort:skip
options,
)
+
__all__ = [
"__version__",
"MockResponse",
@@ -32,6 +34,7 @@ __all__ = [
"WSGIHandler",
"Router",
"Route",
+ "SetCookie",
"mock",
"routes",
"calls",
diff --git a/respx/models.py b/respx/models.py
index 28fd609..b53974f 100644
--- a/respx/models.py
+++ b/respx/models.py
@@ -16,10 +16,13 @@ from warnings import warn
import httpx
+from respx.utils import SetCookie
+
from .patterns import M, Pattern
from .types import (
CallableSideEffect,
Content,
+ CookieTypes,
HeaderTypes,
ResolvedResponseTypes,
RouteResultTypes,
@@ -90,6 +93,7 @@ class MockResponse(httpx.Response):
content: Optional[Content] = None,
content_type: Optional[str] = None,
http_version: Optional[str] = None,
+ cookies: Optional[Union[CookieTypes, Sequence[SetCookie]]] = None,
**kwargs: Any,
) -> None:
if not isinstance(content, (str, bytes)) and (
@@ -110,6 +114,19 @@ class MockResponse(httpx.Response):
if content_type:
self.headers["Content-Type"] = content_type
+ if cookies:
+ if isinstance(cookies, dict):
+ cookies = tuple(cookies.items())
+ self.headers = httpx.Headers(
+ (
+ *self.headers.multi_items(),
+ *(
+ cookie if isinstance(cookie, SetCookie) else SetCookie(*cookie)
+ for cookie in cookies
+ ),
+ )
+ )
+
class Route:
def __init__(
@@ -256,6 +273,7 @@ class Route:
status_code: int = 200,
*,
headers: Optional[HeaderTypes] = None,
+ cookies: Optional[Union[CookieTypes, Sequence[SetCookie]]] = None,
content: Optional[Content] = None,
text: Optional[str] = None,
html: Optional[str] = None,
@@ -268,6 +286,7 @@ class Route:
response = MockResponse(
status_code,
headers=headers,
+ cookies=cookies,
content=content,
text=text,
html=html,
diff --git a/respx/utils.py b/respx/utils.py
index 434c30d..5a6ce3a 100644
--- a/respx/utils.py
+++ b/respx/utils.py
@@ -1,8 +1,14 @@
import email
+from datetime import datetime
from email.message import Message
-from typing import List, Tuple, cast
+from typing import Dict, List, NamedTuple, Optional, Tuple, Type, TypeVar, Union, cast
from urllib.parse import parse_qsl
+try:
+ from typing import Literal # type: ignore[attr-defined]
+except ImportError: # pragma: no cover
+ from typing_extensions import Literal
+
import httpx
@@ -71,3 +77,62 @@ def decode_data(request: httpx.Request) -> Tuple[MultiItems, MultiItems]:
files = MultiItems()
return data, files
+
+
+Self = TypeVar("Self", bound="SetCookie")
+
+
+class SetCookie(
+ NamedTuple(
+ "SetCookie",
+ [
+ ("header_name", Literal["Set-Cookie"]),
+ ("header_value", str),
+ ],
+ )
+):
+ def __new__(
+ cls: Type[Self],
+ name: str,
+ value: str,
+ *,
+ path: Optional[str] = None,
+ domain: Optional[str] = None,
+ expires: Optional[Union[str, datetime]] = None,
+ max_age: Optional[int] = None,
+ http_only: bool = False,
+ same_site: Optional[Literal["Strict", "Lax", "None"]] = None,
+ secure: bool = False,
+ partitioned: bool = False,
+ ) -> Self:
+ """
+ https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#syntax
+ """
+ attrs: Dict[str, Union[str, bool]] = {name: value}
+ if path is not None:
+ attrs["Path"] = path
+ if domain is not None:
+ attrs["Domain"] = domain
+ if expires is not None:
+ if isinstance(expires, datetime): # pragma: no branch
+ expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
+ attrs["Expires"] = expires
+ if max_age is not None:
+ attrs["Max-Age"] = str(max_age)
+ if http_only:
+ attrs["HttpOnly"] = True
+ if same_site is not None:
+ attrs["SameSite"] = same_site
+ if same_site == "None": # pragma: no branch
+ secure = True
+ if secure:
+ attrs["Secure"] = True
+ if partitioned:
+ attrs["Partitioned"] = True
+
+ string = "; ".join(
+ _name if _value is True else f"{_name}={_value}"
+ for _name, _value in attrs.items()
+ )
+ self = super().__new__(cls, "Set-Cookie", string)
+ return self
|
lundberg/respx
|
24ee4a90d3bd38be3ba4358bff50ce69f6578f82
|
diff --git a/tests/test_api.py b/tests/test_api.py
index ef1dddd..c4e0ff7 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -564,6 +564,46 @@ def test_respond():
route.respond(content=Exception()) # type: ignore[arg-type]
+def test_can_respond_with_cookies():
+ with respx.mock:
+ route = respx.get("https://foo.bar/").respond(
+ json={}, headers={"X-Foo": "bar"}, cookies={"foo": "bar", "ham": "spam"}
+ )
+ response = httpx.get("https://foo.bar/")
+ assert len(response.headers) == 5
+ assert response.headers["X-Foo"] == "bar", "mocked header is missing"
+ assert len(response.cookies) == 2
+ assert response.cookies["foo"] == "bar"
+ assert response.cookies["ham"] == "spam"
+
+ route.respond(cookies=[("egg", "yolk")])
+ response = httpx.get("https://foo.bar/")
+ assert len(response.cookies) == 1
+ assert response.cookies["egg"] == "yolk"
+
+ route.respond(
+ cookies=[respx.SetCookie("foo", "bar", path="/", same_site="Lax")]
+ )
+ response = httpx.get("https://foo.bar/")
+ assert len(response.cookies) == 1
+ assert response.cookies["foo"] == "bar"
+
+
+def test_can_mock_response_with_set_cookie_headers():
+ request = httpx.Request("GET", "https://example.com/")
+ response = httpx.Response(
+ 200,
+ headers=[
+ respx.SetCookie("foo", value="bar"),
+ respx.SetCookie("ham", value="spam"),
+ ],
+ request=request,
+ )
+ assert len(response.cookies) == 2
+ assert response.cookies["foo"] == "bar"
+ assert response.cookies["ham"] == "spam"
+
+
@pytest.mark.parametrize(
"kwargs",
[
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..ea9c365
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,33 @@
+from datetime import datetime, timezone
+
+from respx.utils import SetCookie
+
+
+class TestSetCookie:
+ def test_can_render_all_attributes(self) -> None:
+ expires = datetime.fromtimestamp(0, tz=timezone.utc)
+ cookie = SetCookie(
+ "foo",
+ value="bar",
+ path="/",
+ domain=".example.com",
+ expires=expires,
+ max_age=44,
+ http_only=True,
+ same_site="None",
+ partitioned=True,
+ )
+ assert cookie == (
+ "Set-Cookie",
+ (
+ "foo=bar; "
+ "Path=/; "
+ "Domain=.example.com; "
+ "Expires=Thu, 01 Jan 1970 00:00:00 GMT; "
+ "Max-Age=44; "
+ "HttpOnly; "
+ "SameSite=None; "
+ "Secure; "
+ "Partitioned"
+ ),
+ )
|
Feature request: easily mock cookies
It would be super nice if we could do something like this:
```python
respx.get("example.com").respond(cookies={"foo": "bar"})
cookies = httpx.Cookies()
respx.get("example.com").respond(cookies=cookies)
```
|
0.0
|
24ee4a90d3bd38be3ba4358bff50ce69f6578f82
|
[
"tests/test_api.py::test_http_methods",
"tests/test_api.py::test_url_match[https://foo.bar-https://foo.bar]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-None]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-https://foo.bar/baz/]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-^https://foo.bar/\\\\w+/$]",
"tests/test_api.py::test_url_match[https://foo.bar/baz/-pattern5]",
"tests/test_api.py::test_url_match[https://foo.bar:443/baz/-pattern6]",
"tests/test_api.py::test_url_match[https://foo.bar/%08-https://foo.bar/%08]",
"tests/test_api.py::test_invalid_url_pattern",
"tests/test_api.py::test_repeated_pattern",
"tests/test_api.py::test_status_code",
"tests/test_api.py::test_headers[headers0-None-expected0]",
"tests/test_api.py::test_headers[headers1-None-expected1]",
"tests/test_api.py::test_headers[headers2-ham/spam-expected2]",
"tests/test_api.py::test_text_encoding[eldr\\xc3\\xa4v-eldr\\xe4v]",
"tests/test_api.py::test_text_encoding[\\xe4pple-\\xe4pple]",
"tests/test_api.py::test_text_encoding[Gehäusegr\\xf6\\xdfe-Gehäusegr\\xf6\\xdfe]",
"tests/test_api.py::test_content_variants[content-foobar-None0]",
"tests/test_api.py::test_content_variants[content-foobar-None1]",
"tests/test_api.py::test_content_variants[json-value2-application/json]",
"tests/test_api.py::test_content_variants[json-value3-application/json]",
"tests/test_api.py::test_content_variants[text-foobar-text/plain;",
"tests/test_api.py::test_content_variants[html-<strong>foobar</strong>-text/html;",
"tests/test_api.py::test_json_content[content0-headers0-expected_headers0]",
"tests/test_api.py::test_json_content[content1-headers1-expected_headers1]",
"tests/test_api.py::test_json_post_body",
"tests/test_api.py::test_data_post_body",
"tests/test_api.py::test_files_post_body",
"tests/test_api.py::test_raising_content",
"tests/test_api.py::test_callable_content",
"tests/test_api.py::test_request_callback",
"tests/test_api.py::test_pass_through[httpcore-route0-True]",
"tests/test_api.py::test_pass_through[httpx-route1-True]",
"tests/test_api.py::test_pass_through[httpcore-route2-False]",
"tests/test_api.py::test_pass_through[httpcore-route3-True]",
"tests/test_api.py::test_parallel_requests",
"tests/test_api.py::test_method_case[DELETE-delete]",
"tests/test_api.py::test_method_case[delete-delete]",
"tests/test_api.py::test_method_case[GET-get]",
"tests/test_api.py::test_method_case[get-get]",
"tests/test_api.py::test_method_case[HEAD-head]",
"tests/test_api.py::test_method_case[head-head]",
"tests/test_api.py::test_method_case[OPTIONS-options]",
"tests/test_api.py::test_method_case[options-options]",
"tests/test_api.py::test_method_case[PATCH-patch]",
"tests/test_api.py::test_method_case[patch-patch]",
"tests/test_api.py::test_method_case[POST-post]",
"tests/test_api.py::test_method_case[post-post]",
"tests/test_api.py::test_method_case[PUT-put]",
"tests/test_api.py::test_method_case[put-put]",
"tests/test_api.py::test_pop",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar0]",
"tests/test_api.py::test_params_match[https://foo/-foo=bar-https://foo/-foo=bar1]",
"tests/test_api.py::test_params_match[https://foo/-params2-https://foo/-call_params2]",
"tests/test_api.py::test_params_match[https://foo/-params3-https://foo/-call_params3]",
"tests/test_api.py::test_params_match[https://foo/-params4-https://foo/-call_params4]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar-baz=qux]",
"tests/test_api.py::test_params_match[https://foo?foo=bar-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_params_match[https://foo/(\\\\w+)/-foo=bar-https://foo/bar/-foo=bar]",
"tests/test_api.py::test_params_match[url8-foo=bar-https://foo/-foo=bar]",
"tests/test_api.py::test_params_match[url9-baz=qux-https://foo?foo=bar&baz=qux-None]",
"tests/test_api.py::test_build_url_base[None-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[-https://foo.bar/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/baz/]",
"tests/test_api.py::test_build_url_base[https://foo.bar/baz/-None]",
"tests/test_api.py::test_build_url_base[https://foo.bar/-/(\\\\w+)/]",
"tests/test_api.py::test_add",
"tests/test_api.py::test_respond",
"tests/test_api.py::test_can_respond_with_cookies",
"tests/test_api.py::test_can_mock_response_with_set_cookie_headers",
"tests/test_api.py::test_async_post_content[kwargs0]",
"tests/test_api.py::test_async_post_content[kwargs1]",
"tests/test_api.py::test_async_post_content[kwargs2]",
"tests/test_api.py::test_async_post_content[kwargs3]",
"tests/test_api.py::test_async_post_content[kwargs4]",
"tests/test_utils.py::TestSetCookie::test_can_render_all_attributes"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-18 15:46:37+00:00
|
bsd-3-clause
| 3,652 |
|
lundberg__respx-259
|
diff --git a/respx/patterns.py b/respx/patterns.py
index d8b00b5..8d80148 100644
--- a/respx/patterns.py
+++ b/respx/patterns.py
@@ -548,7 +548,9 @@ class Data(MultiItemsMixin, Pattern):
value: MultiItems
def clean(self, value: Dict) -> MultiItems:
- return MultiItems(value)
+ return MultiItems(
+ (key, "" if value is None else str(value)) for key, value in value.items()
+ )
def parse(self, request: httpx.Request) -> Any:
data, _ = decode_data(request)
diff --git a/respx/utils.py b/respx/utils.py
index 5a6ce3a..5eb4715 100644
--- a/respx/utils.py
+++ b/respx/utils.py
@@ -1,7 +1,18 @@
import email
from datetime import datetime
from email.message import Message
-from typing import Dict, List, NamedTuple, Optional, Tuple, Type, TypeVar, Union, cast
+from typing import (
+ Any,
+ Dict,
+ List,
+ NamedTuple,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+)
from urllib.parse import parse_qsl
try:
@@ -13,13 +24,13 @@ import httpx
class MultiItems(dict):
- def get_list(self, key: str) -> List[str]:
+ def get_list(self, key: str) -> List[Any]:
try:
return [self[key]]
except KeyError: # pragma: no cover
return []
- def multi_items(self) -> List[Tuple[str, str]]:
+ def multi_items(self) -> List[Tuple[str, Any]]:
return list(self.items())
|
lundberg/respx
|
15522db36d6a08f2c062831ec664df2b9d2e1f69
|
diff --git a/tests/test_patterns.py b/tests/test_patterns.py
index dda7914..4b119aa 100644
--- a/tests/test_patterns.py
+++ b/tests/test_patterns.py
@@ -350,6 +350,18 @@ def test_content_pattern(lookup, content, expected):
None,
True,
),
+ (
+ Lookup.EQUAL,
+ {"none_value": None},
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"non_str": 123},
+ None,
+ True,
+ ),
(
Lookup.EQUAL,
{"x": "a"},
|
None type doesn't work in data matching as of v0.21.0
As of the latest version `v0.21.0`, None values in data lookups don't work:
```python
import httpx
import respx
@respx.mock
def test():
data = {"test": None}
respx.post("http://test.com", data=data).respond()
response = httpx.post("http://test.com", data=data)
assert response.status_code == 200
if __name__ == "__main__":
test()
```
output:
```python
respx.models.AllMockedAssertionError: RESPX: <Request('POST', 'http://test.com/')> not mocked!
```
This test passes successfully in `v0.20.2`
The following _do_ work:
```python
data = {"test": ""}
respx.post("http://test.com", data=data).respond()
response = httpx.post("http://test.com", data=data)
```
```python
respx.post("http://test.com", data={"test": ""}).respond()
response = httpx.post("http://test.com", data={"test": None})
```
So there's just a bug somewhere translating None to ""
|
0.0
|
15522db36d6a08f2c062831ec664df2b9d2e1f69
|
[
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data4-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data5-None-True]"
] |
[
"tests/test_patterns.py::test_bitwise_and",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://ham.spam/-True]",
"tests/test_patterns.py::test_bitwise_operators[PATCH-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[PUT-https://foo.bar/-False]",
"tests/test_patterns.py::test_match_context",
"tests/test_patterns.py::test_noop_pattern",
"tests/test_patterns.py::test_m_pattern[kwargs0-https://foo.bar/-True]",
"tests/test_patterns.py::test_m_pattern[kwargs1-https://foo.bar/?x=y-False]",
"tests/test_patterns.py::test_m_pattern[kwargs2-https://foo.bar/?x=y-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-GET-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-get-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-POST-False]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value3-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value4-False]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers0-request_headers0-True]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers1--False]",
"tests/test_patterns.py::test_headers_pattern_hash",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies0-request_cookies0-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies1-request_cookies1-False]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies2-request_cookies2-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies3-request_cookies3-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies4-request_cookies4-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies5-None-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies6-request_cookies6-False]",
"tests/test_patterns.py::test_cookies_pattern__hash",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-https-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-HTTPS-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-http-False]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.IN-scheme3-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-foo.bar-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-ham.spam-False]",
"tests/test_patterns.py::test_host_pattern[Lookup.REGEX-.+\\\\.bar-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-443-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-https://foo.bar/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar:8080/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-22-//foo.bar:22/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-None-//foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port7-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port8-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port9-https://foo.bar:8080/-False]",
"tests/test_patterns.py::test_path_pattern",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params5-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params6-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params7-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params8-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params9-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params10-https://foo.bar/?x=1&x=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params11-https://foo.bar/?x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x-https://foo.bar/?x-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params18-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params19-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params20-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params21-https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2&x=1-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern_hash",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https?://a.b/(?P<c>\\\\w+)/-context0-http://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-^https://a.b/.+$-context1-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https://a.b/c/-context2-https://x.y/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/c/-context3-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/x/-context4-https://a.b/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b?x=y-context5-https://a.b/?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/?x=y-context6-https://a.b?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-https://a.b/b-context7-https://a.b/baz/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-http://a.b/baz/-context8-https://a.b/baz/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-value9-context9-https://[FE80::1]-True]",
"tests/test_patterns.py::test_url_pattern_invalid",
"tests/test_patterns.py::test_url_pattern_hash",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-baz-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data0-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data1-request_data1-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data2-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data3-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data6-request_data6-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data7-request_data7-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.CONTAINS-data8-request_data8-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files0-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files1-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files2-request_files2-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files3-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files4-request_files4-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files5-request_files5-False]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files6-request_files6-False]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files7-request_files7-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files8-request_files8-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files9-request_files9-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files10-request_files10-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value0-json0-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value1-json1-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value2-json2-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value3-json3-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-json-string-json-string-True]",
"tests/test_patterns.py::test_json_pattern_path[json0-foo__bar-baz-True]",
"tests/test_patterns.py::test_json_pattern_path[json1-x-value1-True]",
"tests/test_patterns.py::test_json_pattern_path[json2-ham__1__egg-yolk-True]",
"tests/test_patterns.py::test_json_pattern_path[json3-0__name-jonas-True]",
"tests/test_patterns.py::test_json_pattern_path[json4-pk-123-True]",
"tests/test_patterns.py::test_json_pattern_path[json5-foo__ham-spam-False]",
"tests/test_patterns.py::test_json_pattern_path[json6-1__name-lundberg-False]",
"tests/test_patterns.py::test_invalid_pattern",
"tests/test_patterns.py::test_iter_pattern",
"tests/test_patterns.py::test_parse_url_patterns",
"tests/test_patterns.py::test_merge_patterns",
"tests/test_patterns.py::test_unique_pattern_key"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-26 11:10:47+00:00
|
bsd-3-clause
| 3,653 |
|
lundberg__respx-260
|
diff --git a/respx/patterns.py b/respx/patterns.py
index 8d80148..da2022a 100644
--- a/respx/patterns.py
+++ b/respx/patterns.py
@@ -1,3 +1,4 @@
+import io
import json as jsonlib
import operator
import pathlib
@@ -562,7 +563,7 @@ class Files(MultiItemsMixin, Pattern):
key = "files"
value: MultiItems
- def _normalize_file_value(self, value: FileTypes) -> Tuple[Any, ...]:
+ def _normalize_file_value(self, value: FileTypes) -> Tuple[Any, Any]:
# Mimic httpx `FileField` to normalize `files` kwarg to shortest tuple style
if isinstance(value, tuple):
filename, fileobj = value[:2]
@@ -573,6 +574,12 @@ class Files(MultiItemsMixin, Pattern):
filename = ANY
fileobj = value
+ # Normalize file-like objects and strings to bytes to allow equality check
+ if isinstance(fileobj, io.BytesIO):
+ fileobj = fileobj.read()
+ elif isinstance(fileobj, str):
+ fileobj = fileobj.encode()
+
return filename, fileobj
def clean(self, value: RequestFiles) -> MultiItems:
|
lundberg/respx
|
de7a983ca141ef04bf93d5ddba3c9100d9d57eda
|
diff --git a/tests/test_patterns.py b/tests/test_patterns.py
index 4b119aa..451b0dd 100644
--- a/tests/test_patterns.py
+++ b/tests/test_patterns.py
@@ -1,3 +1,4 @@
+import io
import re
from unittest.mock import ANY
@@ -456,6 +457,18 @@ def test_data_pattern(lookup, data, request_data, expected):
},
False,
),
+ (
+ Lookup.EQUAL,
+ {"file_1": ("filename.png", io.BytesIO(b"some..image..data"), "image/png")},
+ None,
+ True,
+ ),
+ (
+ Lookup.EQUAL,
+ {"file_1": ("filename.png", "some..image..data", "image/png")}, # str data
+ {"file_1": ("filename.png", io.BytesIO(b"some..image..data"), "image/png")},
+ True,
+ ),
(
Lookup.CONTAINS,
{
@@ -487,6 +500,15 @@ def test_data_pattern(lookup, data, request_data, expected):
},
True,
),
+ (
+ Lookup.CONTAINS,
+ {"file_1": "foo..."}, # str data
+ {
+ "file_1": ("filename_1.txt", io.BytesIO(b"foo...")),
+ "file_2": ("filename_2.txt", io.BytesIO(b"bar...")),
+ },
+ True,
+ ),
(
Lookup.CONTAINS,
[("file_1", b"ham...")],
|
`files` pattern not handling `str` and `io.BytesIO`
When using the `files` parameter in a helper (http) function, both `str` and `io.BytesIO` payloads are not deemed equal and raise an error.
# Input
## `str` payload
```py
@respx.mock
def test_file_str_payload():
FILE_DATA = {"upload": ("image/png", "str", "image/png")}
respx.patch("https://api.example.com/endpoint/123", files=FILE_DATA)
httpx.patch("https://api.example.com/endpoint/123", files=FILE_DATA)
```
## `io.BytesIO` payload
```py
@respx.mock
def test_file_bytesio_payload():
FILE_DATA = {
"upload": ("image/png", io.BytesIO(b"some image content"), "image/png")
}
respx.patch("https://api.example.com/endpoint/123", files=FILE_DATA)
httpx.patch("https://api.example.com/endpoint/123", files=FILE_DATA)
```
# Output
## Error raised
```bash
raise AllMockedAssertionError(f"RESPX: {request!r} not mocked!")
respx.models.AllMockedAssertionError: RESPX: <Request('PATCH', 'https://api.example.com/endpoint/123')> not mocked!
```
|
0.0
|
de7a983ca141ef04bf93d5ddba3c9100d9d57eda
|
[
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files7-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files8-request_files8-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files12-request_files12-True]"
] |
[
"tests/test_patterns.py::test_bitwise_and",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[GET-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[POST-https://ham.spam/-True]",
"tests/test_patterns.py::test_bitwise_operators[PATCH-https://foo.bar/-True]",
"tests/test_patterns.py::test_bitwise_operators[PUT-https://foo.bar/-False]",
"tests/test_patterns.py::test_match_context",
"tests/test_patterns.py::test_noop_pattern",
"tests/test_patterns.py::test_m_pattern[kwargs0-https://foo.bar/-True]",
"tests/test_patterns.py::test_m_pattern[kwargs1-https://foo.bar/?x=y-False]",
"tests/test_patterns.py::test_m_pattern[kwargs2-https://foo.bar/?x=y-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-GET-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-get-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.EQUAL-POST-False]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value3-True]",
"tests/test_patterns.py::test_method_pattern[Lookup.IN-value4-False]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers0-request_headers0-True]",
"tests/test_patterns.py::test_headers_pattern[Lookup.CONTAINS-headers1--False]",
"tests/test_patterns.py::test_headers_pattern_hash",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies0-request_cookies0-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.CONTAINS-cookies1-request_cookies1-False]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies2-request_cookies2-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies3-request_cookies3-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies4-request_cookies4-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies5-None-True]",
"tests/test_patterns.py::test_cookies_pattern[Lookup.EQUAL-cookies6-request_cookies6-False]",
"tests/test_patterns.py::test_cookies_pattern__hash",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-https-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-HTTPS-True]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.EQUAL-http-False]",
"tests/test_patterns.py::test_scheme_pattern[Lookup.IN-scheme3-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-foo.bar-True]",
"tests/test_patterns.py::test_host_pattern[Lookup.EQUAL-ham.spam-False]",
"tests/test_patterns.py::test_host_pattern[Lookup.REGEX-.+\\\\.bar-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-443-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-https://foo.bar/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-80-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar:8080/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-8080-https://foo.bar/baz/-False]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-22-//foo.bar:22/baz/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.EQUAL-None-//foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port7-http://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port8-https://foo.bar/-True]",
"tests/test_patterns.py::test_port_pattern[Lookup.IN-port9-https://foo.bar:8080/-False]",
"tests/test_patterns.py::test_path_pattern",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params5-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params6-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params7-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params8-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params9-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params10-https://foo.bar/?x=1&x=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-params11-https://foo.bar/?x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL--https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x-https://foo.bar/?x-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=-https://foo.bar/?x=-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params18-https://foo.bar/?x=1-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params19-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params20-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-params21-https://foo.bar/-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-x=1&y=2-https://foo.bar/?x=1-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=2&x=1-https://foo.bar/?x=1&y=2-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.EQUAL-y=3&x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=2&x=1-https://foo.bar/?x=1&x=2&y=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&x=3-False]",
"tests/test_patterns.py::test_params_pattern[Lookup.CONTAINS-x=1&x=2-https://foo.bar/?x=1&x=2&y=3-True]",
"tests/test_patterns.py::test_params_pattern_hash",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https?://a.b/(?P<c>\\\\w+)/-context0-http://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-^https://a.b/.+$-context1-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.REGEX-https://a.b/c/-context2-https://x.y/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/c/-context3-https://a.b/c/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/x/-context4-https://a.b/c/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b?x=y-context5-https://a.b/?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-https://a.b/?x=y-context6-https://a.b?x=y-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-https://a.b/b-context7-https://a.b/baz/-True]",
"tests/test_patterns.py::test_url_pattern[Lookup.STARTS_WITH-http://a.b/baz/-context8-https://a.b/baz/-False]",
"tests/test_patterns.py::test_url_pattern[Lookup.EQUAL-value9-context9-https://[FE80::1]-True]",
"tests/test_patterns.py::test_url_pattern_invalid",
"tests/test_patterns.py::test_url_pattern_hash",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.EQUAL-foobar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True0]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-bar-True1]",
"tests/test_patterns.py::test_content_pattern[Lookup.CONTAINS-baz-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data0-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data1-request_data1-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data2-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data3-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data4-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data5-None-True]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data6-request_data6-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.EQUAL-data7-request_data7-False]",
"tests/test_patterns.py::test_data_pattern[Lookup.CONTAINS-data8-request_data8-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files0-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files1-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files2-request_files2-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files3-None-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files4-request_files4-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files5-request_files5-False]",
"tests/test_patterns.py::test_files_pattern[Lookup.EQUAL-files6-request_files6-False]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files9-request_files9-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files10-request_files10-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files11-request_files11-True]",
"tests/test_patterns.py::test_files_pattern[Lookup.CONTAINS-files13-request_files13-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value0-json0-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value1-json1-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value2-json2-True]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-value3-json3-False]",
"tests/test_patterns.py::test_json_pattern[Lookup.EQUAL-json-string-json-string-True]",
"tests/test_patterns.py::test_json_pattern_path[json0-foo__bar-baz-True]",
"tests/test_patterns.py::test_json_pattern_path[json1-x-value1-True]",
"tests/test_patterns.py::test_json_pattern_path[json2-ham__1__egg-yolk-True]",
"tests/test_patterns.py::test_json_pattern_path[json3-0__name-jonas-True]",
"tests/test_patterns.py::test_json_pattern_path[json4-pk-123-True]",
"tests/test_patterns.py::test_json_pattern_path[json5-foo__ham-spam-False]",
"tests/test_patterns.py::test_json_pattern_path[json6-1__name-lundberg-False]",
"tests/test_patterns.py::test_invalid_pattern",
"tests/test_patterns.py::test_iter_pattern",
"tests/test_patterns.py::test_parse_url_patterns",
"tests/test_patterns.py::test_merge_patterns",
"tests/test_patterns.py::test_unique_pattern_key"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-26 17:38:49+00:00
|
bsd-3-clause
| 3,654 |
|
lyft__cartography-680
|
diff --git a/cartography/graph/job.py b/cartography/graph/job.py
index f1684ab..73afb1c 100644
--- a/cartography/graph/job.py
+++ b/cartography/graph/job.py
@@ -1,8 +1,15 @@
import json
import logging
+from pathlib import Path
+from typing import Dict
+from typing import List
+import neo4j
+
+from cartography.graph.statement import get_job_shortname
from cartography.graph.statement import GraphStatement
+
logger = logging.getLogger(__name__)
@@ -24,18 +31,21 @@ class GraphJob:
A job that will run against the cartography graph. A job is a sequence of statements which execute sequentially.
"""
- def __init__(self, name, statements):
+ def __init__(self, name: str, statements: List[GraphStatement], short_name: str = None):
+ # E.g. "Okta intel module cleanup"
self.name = name
- self.statements = statements
+ self.statements: List[GraphStatement] = statements
+ # E.g. "okta_import_cleanup"
+ self.short_name = short_name
- def merge_parameters(self, parameters):
+ def merge_parameters(self, parameters: Dict) -> None:
"""
Merge parameters for all job statements.
"""
for s in self.statements:
s.merge_parameters(parameters)
- def run(self, neo4j_session):
+ def run(self, neo4j_session: neo4j.Session):
"""
Run the job. This will execute all statements sequentially.
"""
@@ -50,52 +60,58 @@ class GraphJob:
e,
)
raise
- logger.debug("Finished job '%s'.", self.name)
+ log_msg = f"Finished job {self.short_name}" if self.short_name else f"Finished job {self.name}"
+ logger.info(log_msg)
- def as_dict(self):
+ def as_dict(self) -> Dict:
"""
Convert job to a dictionary.
"""
return {
"name": self.name,
"statements": [s.as_dict() for s in self.statements],
+ "short_name": self.short_name,
}
@classmethod
- def from_json(cls, blob):
+ def from_json(cls, blob: str, short_name: str = None):
"""
Create a job from a JSON blob.
"""
- data = json.loads(blob)
+ data: Dict = json.loads(blob)
statements = _get_statements_from_json(data)
name = data["name"]
- return cls(name, statements)
+ return cls(name, statements, short_name)
@classmethod
- def from_json_file(cls, file_path):
+ def from_json_file(cls, file_path: Path):
"""
Create a job from a JSON file.
"""
with open(file_path) as j_file:
- data = json.load(j_file)
- statements = _get_statements_from_json(data)
- name = data["name"]
- return cls(name, statements)
+ data: Dict = json.load(j_file)
+
+ job_shortname: str = get_job_shortname(file_path)
+ statements: List[GraphStatement] = _get_statements_from_json(data, job_shortname)
+ name: str = data["name"]
+ return cls(name, statements, job_shortname)
@classmethod
- def run_from_json(cls, neo4j_session, blob, parameters=None):
+ def run_from_json(
+ cls, neo4j_session: neo4j.Session, blob: str, parameters: Dict = None, short_name: str = None,
+ ) -> None:
"""
Run a job from a JSON blob. This will deserialize the job and execute all statements sequentially.
"""
if not parameters:
parameters = {}
- job = cls.from_json(blob)
+ job: GraphJob = cls.from_json(blob, short_name)
job.merge_parameters(parameters)
job.run(neo4j_session)
@classmethod
- def run_from_json_file(cls, file_path, neo4j_session, parameters=None):
+ def run_from_json_file(cls, file_path: Path, neo4j_session: neo4j.Session, parameters: Dict = None) -> None:
"""
Run a job from a JSON file. This will deserialize the job and execute all statements sequentially.
"""
@@ -103,17 +119,18 @@ class GraphJob:
parameters = {}
job = cls.from_json_file(file_path)
+
job.merge_parameters(parameters)
job.run(neo4j_session)
-def _get_statements_from_json(blob):
+def _get_statements_from_json(blob: Dict, short_job_name: str = None) -> List[GraphStatement]:
"""
Deserialize all statements from the JSON blob.
"""
- statements = []
- for statement_data in blob["statements"]:
- statement = GraphStatement.create_from_json(statement_data)
+ statements: List[GraphStatement] = []
+ for i, statement_data in enumerate(blob["statements"]):
+ statement: GraphStatement = GraphStatement.create_from_json(statement_data, short_job_name, i)
statements.append(statement)
return statements
diff --git a/cartography/graph/statement.py b/cartography/graph/statement.py
index fc5e5ba..9c83e67 100644
--- a/cartography/graph/statement.py
+++ b/cartography/graph/statement.py
@@ -1,9 +1,17 @@
import json
import logging
+import os
+from pathlib import Path
+from typing import Dict
+from typing import Union
import neo4j
+from cartography.stats import get_stats_client
+
+
logger = logging.getLogger(__name__)
+stat_handler = get_stats_client(__name__)
class GraphStatementJSONEncoder(json.JSONEncoder):
@@ -19,20 +27,32 @@ class GraphStatementJSONEncoder(json.JSONEncoder):
return json.JSONEncoder.default(self, obj)
+# TODO move this cartography.util after we move util.run_*_job to cartography.graph.job.
+def get_job_shortname(file_path: Union[Path, str]) -> str:
+ # Return filename without path and extension
+ return os.path.splitext(file_path)[0]
+
+
class GraphStatement:
"""
A statement that will run against the cartography graph. Statements can query or update the graph.
"""
- def __init__(self, query, parameters=None, iterative=False, iterationsize=0):
+ def __init__(
+ self, query: str, parameters: Dict = None, iterative: bool = False, iterationsize: int = 0,
+ parent_job_name: str = None, parent_job_sequence_num: int = None,
+ ):
self.query = query
- self.parameters = parameters
+ self.parameters: Dict = parameters
if not parameters:
self.parameters = {}
self.iterative = iterative
self.iterationsize = iterationsize
self.parameters["LIMIT_SIZE"] = self.iterationsize
+ self.parent_job_name = parent_job_name if parent_job_name else None
+ self.parent_job_sequence_num = parent_job_sequence_num if parent_job_sequence_num else None
+
def merge_parameters(self, parameters):
"""
Merge given parameters with existing parameters.
@@ -41,17 +61,15 @@ class GraphStatement:
tmp.update(parameters)
self.parameters = tmp
- def run(self, session) -> None:
+ def run(self, session: neo4j.Session) -> None:
"""
Run the statement. This will execute the query against the graph.
"""
- tx: neo4j.Transaction = session.begin_transaction()
if self.iterative:
- self._run_iterative(tx)
+ self._run_iterative(session)
else:
- data: neo4j.StatementResult = self._run(tx)
- data.consume()
- tx.commit()
+ session.write_transaction(self._run_noniterative).consume()
+ logger.info(f"Completed {self.parent_job_name} statement #{self.parent_job_sequence_num}")
def as_dict(self):
"""
@@ -64,13 +82,32 @@ class GraphStatement:
"iterationsize": self.iterationsize,
}
- def _run(self, tx: neo4j.Transaction) -> neo4j.StatementResult:
+ def _run_noniterative(self, tx: neo4j.Transaction) -> neo4j.StatementResult:
"""
Non-iterative statement execution.
"""
- return tx.run(self.query, self.parameters)
+ result: neo4j.StatementResult = tx.run(self.query, self.parameters)
+
+ # Handle stats
+ summary: neo4j.BoltStatementResultSummary = result.summary()
+ objects_changed: int = (
+ summary.counters.constraints_added +
+ summary.counters.constraints_removed +
+ summary.counters.indexes_added +
+ summary.counters.indexes_removed +
+ summary.counters.labels_added +
+ summary.counters.labels_removed +
+ summary.counters.nodes_created +
+ summary.counters.nodes_deleted +
+ summary.counters.properties_set +
+ summary.counters.relationships_created +
+ summary.counters.relationships_deleted
+ )
+ stat_handler.incr(f'{self.parent_job_name}-{self.parent_job_sequence_num}-objects_changed', objects_changed)
+
+ return result
- def _run_iterative(self, tx: neo4j.Transaction) -> None:
+ def _run_iterative(self, session: neo4j.Session) -> None:
"""
Iterative statement execution.
@@ -79,20 +116,17 @@ class GraphStatement:
self.parameters["LIMIT_SIZE"] = self.iterationsize
while True:
- result: neo4j.StatementResult = self._run(tx)
- record: neo4j.Record = result.single()
-
- # TODO: use the BoltStatementResultSummary object to determine the number of items processed
- total_completed = int(record['TotalCompleted'])
- logger.debug("Processed %d items", total_completed)
+ result: neo4j.StatementResult = session.write_transaction(self._run_noniterative)
- # Ensure network buffers are cleared
- result.consume()
- if total_completed == 0:
+ # Exit if we have finished processing all items
+ if not result.summary().counters.contains_updates:
+ # Ensure network buffers are cleared
+ result.consume()
break
+ result.consume()
@classmethod
- def create_from_json(cls, json_obj):
+ def create_from_json(cls, json_obj: Dict, short_job_name: str = None, job_sequence_num: int = None):
"""
Create a statement from a JSON blob.
"""
@@ -101,14 +135,16 @@ class GraphStatement:
json_obj.get("parameters", {}),
json_obj.get("iterative", False),
json_obj.get("iterationsize", 0),
+ short_job_name,
+ job_sequence_num,
)
@classmethod
- def create_from_json_file(cls, file_path):
+ def create_from_json_file(cls, file_path: Path):
"""
Create a statement from a JSON file.
"""
with open(file_path) as json_file:
data = json.load(json_file)
- return cls.create_from_json(data)
+ return cls.create_from_json(data, get_job_shortname(file_path))
diff --git a/cartography/util.py b/cartography/util.py
index 4fdb3f6..cd8fdc7 100644
--- a/cartography/util.py
+++ b/cartography/util.py
@@ -5,8 +5,10 @@ from typing import Dict
from typing import Optional
import botocore
+import neo4j
from cartography.graph.job import GraphJob
+from cartography.graph.statement import get_job_shortname
from cartography.stats import get_stats_client
if sys.version_info >= (3, 7):
@@ -25,10 +27,14 @@ def run_analysis_job(filename, neo4j_session, common_job_parameters, package='ca
filename,
),
common_job_parameters,
+ get_job_shortname(filename),
)
-def run_cleanup_job(filename, neo4j_session, common_job_parameters, package='cartography.data.jobs.cleanup'):
+def run_cleanup_job(
+ filename: str, neo4j_session: neo4j.Session, common_job_parameters: Dict,
+ package: str = 'cartography.data.jobs.cleanup',
+) -> None:
GraphJob.run_from_json(
neo4j_session,
read_text(
@@ -36,6 +42,7 @@ def run_cleanup_job(filename, neo4j_session, common_job_parameters, package='car
filename,
),
common_job_parameters,
+ get_job_shortname(filename),
)
|
lyft/cartography
|
694d6f572763ab22abf141e5b3480dfb05368757
|
diff --git a/tests/data/jobs/__init__.py b/tests/data/jobs/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/data/jobs/sample.py b/tests/data/jobs/sample.py
new file mode 100644
index 0000000..1d4e334
--- /dev/null
+++ b/tests/data/jobs/sample.py
@@ -0,0 +1,19 @@
+SAMPLE_CLEANUP_JOB = """
+{
+ "statements": [
+ {
+ "query": "MATCH(:TypeA)-[r:REL]->(:TypeB) WHERE r.lastupdated <> {UPDATE_TAG} WITH r LIMIT {LIMIT_SIZE} DELETE r",
+ "iterative": true,
+ "iterationsize": 100
+ },{
+ "query": "MATCH (n:TypeA) WHERE n.lastupdated <> {UPDATE_TAG} WITH n LIMIT {LIMIT_SIZE} DETACH DELETE (n)",
+ "iterative": true,
+ "iterationsize": 100
+ },{
+ "query": "MATCH (n:TypeB) WHERE n.lastupdated <> {UPDATE_TAG} WITH n LIMIT {LIMIT_SIZE} DETACH DELETE (n)",
+ "iterative": true,
+ "iterationsize": 100
+ }],
+ "name": "cleanup stale resources"
+}
+"""
diff --git a/tests/integration/cartography/data/jobs/test_cleanup_jobs.py b/tests/integration/cartography/data/jobs/test_cleanup_jobs.py
new file mode 100644
index 0000000..d20fbfc
--- /dev/null
+++ b/tests/integration/cartography/data/jobs/test_cleanup_jobs.py
@@ -0,0 +1,133 @@
+from unittest import mock
+
+import cartography.util
+from cartography.util import run_cleanup_job
+
+
+UPDATE_TAG_T1 = 111111
+UPDATE_TAG_T2 = 222222
+UPDATE_TAG_T3 = 333333
+UPDATE_TAG_T4 = 444444
+
+
+SAMPLE_CLEANUP_JOB = """
+{
+ "statements": [
+ {
+ "query": "MATCH(:TypeA)-[r:REL]->(:TypeB) WHERE r.lastupdated <> {UPDATE_TAG} WITH r LIMIT {LIMIT_SIZE} DELETE r",
+ "iterative": true,
+ "iterationsize": 100
+ },{
+ "query": "MATCH (n:TypeA) WHERE n.lastupdated <> {UPDATE_TAG} WITH n LIMIT {LIMIT_SIZE} DETACH DELETE (n)",
+ "iterative": true,
+ "iterationsize": 100
+ },{
+ "query": "MATCH (n:TypeB) WHERE n.lastupdated <> {UPDATE_TAG} WITH n LIMIT {LIMIT_SIZE} DETACH DELETE (n)",
+ "iterative": true,
+ "iterationsize": 100
+ }],
+ "name": "cleanup stale resources"
+}
+"""
+
+SAMPLE_JOB_FILENAME = '/path/to/this/cleanupjob/mycleanupjob.json'
+
+
[email protected](cartography.util, 'read_text', return_value=SAMPLE_CLEANUP_JOB)
+def test_run_cleanup_job_on_relationships(mock_read_text: mock.MagicMock, neo4j_session):
+ # Arrange: nodes id1 and id2 are connected to each other at time T2 via stale RELship r
+ neo4j_session.run(
+ """
+ MERGE (a:TypeA{id:"id1", lastupdated:{UPDATE_TAG_T2}})-[r:REL{lastupdated:{UPDATE_TAG_T1}}]->
+ (b:TypeB{id:"id2", lastupdated:{UPDATE_TAG_T2}})
+ """,
+ UPDATE_TAG_T1=UPDATE_TAG_T1,
+ UPDATE_TAG_T2=UPDATE_TAG_T2,
+ )
+
+ # Act: delete all nodes and rels where `lastupdated` != UPDATE_TAG_T2
+ job_parameters = {'UPDATE_TAG': UPDATE_TAG_T2}
+ run_cleanup_job(SAMPLE_JOB_FILENAME, neo4j_session, job_parameters)
+
+ # Assert 1: Node id1 is no longer attached to Node id2
+ nodes = neo4j_session.run(
+ """
+ MATCH (a:TypeA)
+ OPTIONAL MATCH (a)-[r:REL]->(b:TypeB)
+ RETURN a.id, r.lastupdated, b.id
+ """,
+ )
+ actual_nodes = {(n['a.id'], n['r.lastupdated'], n['b.id']) for n in nodes}
+ expected_nodes = {
+ ('id1', None, None),
+ }
+ assert actual_nodes == expected_nodes
+
+ # Assert 2: Node id2 still exists
+ nodes = neo4j_session.run(
+ """
+ MATCH (b:TypeB) RETURN b.id, b.lastupdated
+ """,
+ )
+ actual_nodes = {(n['b.id'], n['b.lastupdated']) for n in nodes}
+ expected_nodes = {
+ ('id2', UPDATE_TAG_T2),
+ }
+ assert actual_nodes == expected_nodes
+ mock_read_text.assert_called_once()
+
+
[email protected](cartography.util, 'read_text', return_value=SAMPLE_CLEANUP_JOB)
+def test_run_cleanup_job_on_nodes(mock_read_text: mock.MagicMock, neo4j_session):
+ # Arrange: we are now at time T3, and node id1 exists but node id2 no longer exists
+ neo4j_session.run(
+ """
+ MATCH (a:TypeA{id:"id1"}) SET a.lastupdated={UPDATE_TAG_T3}
+ """,
+ UPDATE_TAG_T3=UPDATE_TAG_T3,
+ )
+
+ # Act: delete all nodes and rels where `lastupdated` != UPDATE_TAG_T3
+ job_parameters = {'UPDATE_TAG': UPDATE_TAG_T3}
+ run_cleanup_job(SAMPLE_JOB_FILENAME, neo4j_session, job_parameters)
+
+ # Assert: Node id1 is the only node that still exists
+ nodes = neo4j_session.run(
+ """
+ MATCH (n) RETURN n.id, n.lastupdated
+ """,
+ )
+ actual_nodes = {(n['n.id'], n['n.lastupdated']) for n in nodes}
+ expected_nodes = {
+ ('id1', UPDATE_TAG_T3),
+ }
+ assert actual_nodes == expected_nodes
+ mock_read_text.assert_called_once()
+
+
[email protected](cartography.util, 'read_text', return_value=SAMPLE_CLEANUP_JOB)
+def test_run_cleanup_job_iterative_multiple_batches(mock_read_text: mock.MagicMock, neo4j_session):
+ # Arrange: load 300 nodes to the graph
+ for i in range(300):
+ neo4j_session.run(
+ """
+ MATCH (a:TypeA{id:{Id}}) SET a.lastupdated={UPDATE_TAG_T3}
+ """,
+ Id=i,
+ UPDATE_TAG_T3=UPDATE_TAG_T3,
+ )
+
+ # Act: delete all nodes and rels where `lastupdated` != UPDATE_TAG_T4.
+ job_parameters = {'UPDATE_TAG': UPDATE_TAG_T4}
+ run_cleanup_job(SAMPLE_JOB_FILENAME, neo4j_session, job_parameters)
+
+ # Assert: There are no nodes of label :TypeA in the graph, as the job iteratively removed them.
+ nodes = neo4j_session.run(
+ """
+ MATCH (n:TypeA) RETURN n.id
+ """,
+ )
+ actual_nodes = {n['n.id'] for n in nodes}
+ expected_nodes = set()
+ assert actual_nodes == expected_nodes
+ mock_read_text.assert_called_once()
diff --git a/tests/unit/cartography/data/test_graphjob.py b/tests/unit/cartography/data/test_graphjob.py
new file mode 100644
index 0000000..4618729
--- /dev/null
+++ b/tests/unit/cartography/data/test_graphjob.py
@@ -0,0 +1,12 @@
+from cartography.graph.job import GraphJob
+from tests.data.jobs.sample import SAMPLE_CLEANUP_JOB
+
+
+def test_graphjob_from_json():
+ # Act
+ job: GraphJob = GraphJob.from_json(SAMPLE_CLEANUP_JOB)
+
+ # Assert that the job was created from json string contents correctly
+ assert job.name == "cleanup stale resources"
+ assert len(job.statements) == 3
+ assert job.short_name is None
|
Failure during the cleanup job of EC2 snapshots
I've experienced a new failure during the cleanup job of EC2 snapshots:
```
neobolt.exceptions.ClientError: Unable to load NODE with id <redacted>.
```
Full log:
```
ERROR:cartography.graph.job:Unhandled error while executing statement in job 'cleanup EBS Snapshots': Unable to load NODE with id <redacted>.
ERROR:cartography.sync:Unhandled exception during sync stage 'aws'
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/cartography/sync.py", line 73, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 66, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/__init__.py", line 202, in start_aws_ingestion
_sync_multiple_accounts(
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/__init__.py", line 139, in _sync_multiple_accounts
_sync_one_account(
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/__init__.py", line 57, in _sync_one_account
RESOURCE_FUNCTIONS[func_name](**sync_args)
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 66, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/ec2/snapshots.py", line 117, in sync_ebs_snapshots
cleanup_snapshots(neo4j_session, common_job_parameters)
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 66, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/ec2/snapshots.py", line 99, in cleanup_snapshots
run_cleanup_job(
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 32, in run_cleanup_job
GraphJob.run_from_json(
File "/usr/local/lib/python3.9/site-packages/cartography/graph/job.py", line 95, in run_from_json
job.run(neo4j_session)
File "/usr/local/lib/python3.9/site-packages/cartography/graph/job.py", line 45, in run
stm.run(neo4j_session)
File "/usr/local/lib/python3.9/site-packages/cartography/graph/statement.py", line 48, in run
tx: neo4j.Transaction = session.begin_transaction()
File "/usr/local/lib/python3.9/site-packages/neo4j/__init__.py", line 614, in begin_transaction
self._open_transaction(metadata=metadata, timeout=timeout)
File "/usr/local/lib/python3.9/site-packages/neo4j/__init__.py", line 619, in _open_transaction
self._connect(access_mode)
File "/usr/local/lib/python3.9/site-packages/neo4j/__init__.py", line 381, in _connect
self._connection.sync()
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 527, in sync
detail_delta, summary_delta = self.fetch()
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 419, in fetch
return self._fetch()
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 461, in _fetch
response.on_failure(summary_metadata or {})
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 755, in on_failure
raise CypherError.hydrate(**metadata)
neobolt.exceptions.ClientError: Unable to load NODE with id <redacted>.
Traceback (most recent call last):
File "/usr/local/bin/cartography", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.9/site-packages/cartography/cli.py", line 441, in main
return CLI(default_sync, prog='cartography').main(argv)
File "/usr/local/lib/python3.9/site-packages/cartography/cli.py", line 421, in main
return cartography.sync.run_with_config(self.sync, config)
File "/usr/local/lib/python3.9/site-packages/cartography/sync.py", line 150, in run_with_config
return sync.run(neo4j_driver, config)
File "/usr/local/lib/python3.9/site-packages/cartography/sync.py", line 73, in run
stage_func(neo4j_session, config)
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 66, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/__init__.py", line 202, in start_aws_ingestion
_sync_multiple_accounts(
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/__init__.py", line 139, in _sync_multiple_accounts
_sync_one_account(
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/__init__.py", line 57, in _sync_one_account
RESOURCE_FUNCTIONS[func_name](**sync_args)
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 66, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/ec2/snapshots.py", line 117, in sync_ebs_snapshots
cleanup_snapshots(neo4j_session, common_job_parameters)
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 66, in timed
return method(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/cartography/intel/aws/ec2/snapshots.py", line 99, in cleanup_snapshots
run_cleanup_job(
File "/usr/local/lib/python3.9/site-packages/cartography/util.py", line 32, in run_cleanup_job
GraphJob.run_from_json(
File "/usr/local/lib/python3.9/site-packages/cartography/graph/job.py", line 95, in run_from_json
job.run(neo4j_session)
File "/usr/local/lib/python3.9/site-packages/cartography/graph/job.py", line 45, in run
stm.run(neo4j_session)
File "/usr/local/lib/python3.9/site-packages/cartography/graph/statement.py", line 48, in run
tx: neo4j.Transaction = session.begin_transaction()
File "/usr/local/lib/python3.9/site-packages/neo4j/__init__.py", line 614, in begin_transaction
self._open_transaction(metadata=metadata, timeout=timeout)
File "/usr/local/lib/python3.9/site-packages/neo4j/__init__.py", line 619, in _open_transaction
self._connect(access_mode)
File "/usr/local/lib/python3.9/site-packages/neo4j/__init__.py", line 381, in _connect
self._connection.sync()
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 527, in sync
detail_delta, summary_delta = self.fetch()
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 419, in fetch
return self._fetch()
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 461, in _fetch
response.on_failure(summary_metadata or {})
File "/usr/local/lib/python3.9/site-packages/neobolt/direct.py", line 755, in on_failure
raise CypherError.hydrate(**metadata)
neobolt.exceptions.ClientError: Unable to load NODE with id <redacted>.
```
|
0.0
|
694d6f572763ab22abf141e5b3480dfb05368757
|
[
"tests/unit/cartography/data/test_graphjob.py::test_graphjob_from_json"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-01 05:04:09+00:00
|
apache-2.0
| 3,655 |
|
lzakharov__csv2md-12
|
diff --git a/LICENSE b/LICENSE
index e18fb38..34d0931 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2018 Lev Zakharov
+Copyright (c) 2023 Lev Zakharov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 69f12a4..95b3836 100644
--- a/README.md
+++ b/README.md
@@ -87,26 +87,24 @@ You can also specify delimiter, quotation characters and alignment (see [Help](h
To view help run `csv2md -h`:
```commandline
-usage: csv2md [-h] [-d DELIMITER] [-q QUOTECHAR]
- [-c [CENTER_ALIGNED_COLUMNS [CENTER_ALIGNED_COLUMNS ...]]]
- [-r [RIGHT_ALIGNED_COLUMNS [RIGHT_ALIGNED_COLUMNS ...]]]
- [CSV_FILE [CSV_FILE ...]]
+usage: csv2md [-h] [-d DELIMITER] [-q QUOTECHAR] [-c [CENTER_ALIGNED_COLUMNS ...]] [-r [RIGHT_ALIGNED_COLUMNS ...]] [-H] [CSV_FILE ...]
Parse CSV files into Markdown tables.
positional arguments:
CSV_FILE One or more CSV files to parse
-optional arguments:
+options:
-h, --help show this help message and exit
-d DELIMITER, --delimiter DELIMITER
delimiter character. Default is ','
-q QUOTECHAR, --quotechar QUOTECHAR
quotation character. Default is '"'
- -c [CENTER_ALIGNED_COLUMNS [CENTER_ALIGNED_COLUMNS ...]], --center-aligned-columns [CENTER_ALIGNED_COLUMNS [CENTER_ALIGNED_COLUMNS ...]]
+ -c [CENTER_ALIGNED_COLUMNS ...], --center-aligned-columns [CENTER_ALIGNED_COLUMNS ...]
column numbers with center alignment (from zero)
- -r [RIGHT_ALIGNED_COLUMNS [RIGHT_ALIGNED_COLUMNS ...]], --right-aligned-columns [RIGHT_ALIGNED_COLUMNS [RIGHT_ALIGNED_COLUMNS ...]]
+ -r [RIGHT_ALIGNED_COLUMNS ...], --right-aligned-columns [RIGHT_ALIGNED_COLUMNS ...]
column numbers with right alignment (from zero)
+ -H, --no-header-row specify that the input CSV file has no header row. Will create default headers in Excel format (a,b,c,...)
```
## Running Tests
@@ -126,4 +124,4 @@ Feel free to also ask questions on the tracker.
## License
-Copyright (c) 2018 Lev Zakharov. Licensed under [the MIT License](https://raw.githubusercontent.com/lzakharov/csv2md/master/LICENSE).
+Copyright (c) 2023 Lev Zakharov. Licensed under [the MIT License](https://raw.githubusercontent.com/lzakharov/csv2md/master/LICENSE).
diff --git a/csv2md/__main__.py b/csv2md/__main__.py
index 76a7d45..2f23966 100644
--- a/csv2md/__main__.py
+++ b/csv2md/__main__.py
@@ -16,16 +16,18 @@ def main():
type=int, default=[], help='column numbers with center alignment (from zero)')
parser.add_argument('-r', '--right-aligned-columns', metavar='RIGHT_ALIGNED_COLUMNS', nargs='*',
type=int, default=[], help='column numbers with right alignment (from zero)')
+ parser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
+ help='specify that the input CSV file has no header row. Will create default headers in Excel format (a,b,c,...)')
args = parser.parse_args()
if not args.files:
table = Table.parse_csv(sys.stdin, args.delimiter, args.quotechar)
- print(table.markdown(args.center_aligned_columns, args.right_aligned_columns))
+ print(table.markdown(args.center_aligned_columns, args.right_aligned_columns, args.no_header_row))
return
for file in args.files:
table = Table.parse_csv(file, args.delimiter, args.quotechar)
- print(table.markdown(args.center_aligned_columns, args.right_aligned_columns))
+ print(table.markdown(args.center_aligned_columns, args.right_aligned_columns, args.no_header_row))
if __name__ == '__main__':
diff --git a/csv2md/table.py b/csv2md/table.py
index 971bf60..4452cfc 100644
--- a/csv2md/table.py
+++ b/csv2md/table.py
@@ -1,16 +1,24 @@
import csv
+from .utils import column_letter
+
class Table:
def __init__(self, cells):
self.cells = cells
self.widths = list(map(max, zip(*[list(map(len, row)) for row in cells])))
- def markdown(self, center_aligned_columns=None, right_aligned_columns=None):
+ def markdown(self, center_aligned_columns=None, right_aligned_columns=None, no_header_row=False):
+ if len(self.cells) == 0:
+ return ''
+
+ def ljust_row(row):
+ return [cell.ljust(width) for cell, width in zip(row, self.widths)]
+
def format_row(row):
return '| ' + ' | '.join(row) + ' |'
- rows = [format_row([cell.ljust(width) for cell, width in zip(row, self.widths)]) for row in self.cells]
+ rows = [format_row(ljust_row(row)) for row in self.cells]
separators = ['-' * width for width in self.widths]
if right_aligned_columns is not None:
@@ -20,6 +28,10 @@ class Table:
for column in center_aligned_columns:
separators[column] = ':' + ('-' * (self.widths[column] - 2)) + ':'
+ if no_header_row:
+ width = len(self.cells[0])
+ rows.insert(0, format_row(ljust_row(self.make_default_headers(width))))
+
rows.insert(1, format_row(separators))
return '\n'.join(rows)
@@ -27,3 +39,7 @@ class Table:
@staticmethod
def parse_csv(file, delimiter=',', quotechar='"'):
return Table(list(csv.reader(file, delimiter=delimiter, quotechar=quotechar)))
+
+ @staticmethod
+ def make_default_headers(n):
+ return tuple(map(column_letter, range(n)))
diff --git a/csv2md/utils.py b/csv2md/utils.py
new file mode 100644
index 0000000..a50b0a3
--- /dev/null
+++ b/csv2md/utils.py
@@ -0,0 +1,7 @@
+import string
+
+def column_letter(index):
+ """Returns the column letter in Excel format."""
+ letters = string.ascii_lowercase
+ count = len(letters)
+ return letters[index % count] * ((index // count) + 1)
diff --git a/setup.py b/setup.py
index 1587e8c..2da19e6 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ with open('LICENSE') as f:
setup(
name='csv2md',
- version='1.1.2',
+ version='1.2.0',
description='Command line tool for converting CSV files into Markdown tables.',
long_description=readme,
author='Lev Zakharov',
|
lzakharov/csv2md
|
9eb4e07444c8d269efd74e4408e11ac99a7fcd07
|
diff --git a/csv2md/test_table.py b/csv2md/test_table.py
index 496f9f5..a6128cf 100644
--- a/csv2md/test_table.py
+++ b/csv2md/test_table.py
@@ -42,8 +42,23 @@ normal_md_with_alignment = (
'| 1996 | Jeep | Grand Cherokee | MUST SELL! air, moon roof, loaded | 4799.00 |'
)
+normal_md_with_default_columns = (
+ '| a | b | c | d | e |\n'
+ '| ---- | ----- | -------------------------- | --------------------------------- | ------- |\n'
+ '| year | make | model | description | price |\n'
+ '| 1997 | Ford | E350 | ac, abs, moon | 3000.00 |\n'
+ '| 1999 | Chevy | Venture «Extended Edition» | | 4900.00 |\n'
+ '| 1996 | Jeep | Grand Cherokee | MUST SELL! air, moon roof, loaded | 4799.00 |'
+)
+
class TestTable(TestCase):
+ def test_markdown_empty_table(self):
+ expected = ''
+ table = Table([])
+ actual = table.markdown()
+ self.assertEqual(expected, actual)
+
def test_markdown(self):
expected = normal_md
table = Table(normal_cells)
@@ -56,6 +71,12 @@ class TestTable(TestCase):
actual = table.markdown([1, 2], [4])
self.assertEqual(expected, actual)
+ def test_markdown_with_default_columns(self):
+ expected = normal_md_with_default_columns
+ table = Table(normal_cells)
+ actual = table.markdown(no_header_row=True)
+ self.assertEqual(expected, actual)
+
def test_parse_csv(self):
expected_cells = normal_cells
expected_widths = normal_widths
@@ -70,3 +91,8 @@ class TestTable(TestCase):
self.assertEqual(expected_cells, actual.cells)
self.assertEqual(expected_widths, actual.widths)
+ def test_make_default_headers(self):
+ expected = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
+ 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
+ 'y', 'z', 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg')
+ self.assertEqual(Table.make_default_headers(33), expected)
diff --git a/csv2md/test_utils.py b/csv2md/test_utils.py
new file mode 100644
index 0000000..4b9b141
--- /dev/null
+++ b/csv2md/test_utils.py
@@ -0,0 +1,10 @@
+from unittest import TestCase
+
+from .utils import column_letter
+
+
+class TestUtils(TestCase):
+ def test_column_letter(self):
+ self.assertEqual(column_letter(0), 'a')
+ self.assertEqual(column_letter(4), 'e')
+ self.assertEqual(column_letter(30), 'ee')
|
support csv with no header?
[csvkit](https://csvkit.readthedocs.io/en/latest/) supports a [common cli flag](https://csvkit.readthedocs.io/en/latest/common_arguments.html) across all of its tools to support inputting csv files with no header
```
-H, --no-header-row Specify that the input CSV file has no header row.
Will create default headers (a,b,c,...).
```
it would be helpful if `csv2md` supported the same or a similar flag
this would allow quickly creating csv's using jq's `@csv` filter and piping it into `csv2md`
|
0.0
|
9eb4e07444c8d269efd74e4408e11ac99a7fcd07
|
[
"csv2md/test_table.py::TestTable::test_make_default_headers",
"csv2md/test_table.py::TestTable::test_markdown",
"csv2md/test_table.py::TestTable::test_markdown_empty_table",
"csv2md/test_table.py::TestTable::test_markdown_with_alignment",
"csv2md/test_table.py::TestTable::test_markdown_with_default_columns",
"csv2md/test_table.py::TestTable::test_parse_csv",
"csv2md/test_table.py::TestTable::test_parse_csv_with_custom_delimiter",
"csv2md/test_utils.py::TestUtils::test_column_letter"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-06 10:10:37+00:00
|
mit
| 3,656 |
|
lzakharov__csv2md-16
|
diff --git a/csv2md/__main__.py b/csv2md/__main__.py
index 2f23966..5147845 100644
--- a/csv2md/__main__.py
+++ b/csv2md/__main__.py
@@ -2,16 +2,20 @@ import argparse
import sys
from .table import Table
+from .exceptions import BaseError, ColumnIdentifierError
def main():
- parser = argparse.ArgumentParser(description='Parse CSV files into Markdown tables.')
+ parser = argparse.ArgumentParser(
+ description='Parse CSV files into Markdown tables.')
parser.add_argument('files', metavar='CSV_FILE', type=argparse.FileType('r'), nargs='*',
help='One or more CSV files to parse')
parser.add_argument('-d', '--delimiter', metavar='DELIMITER', type=str, default=',',
help='delimiter character. Default is \',\'')
parser.add_argument('-q', '--quotechar', metavar='QUOTECHAR', type=str, default='"',
help='quotation character. Default is \'"\'')
+ parser.add_argument('-C', '--columns', dest='columns', type=str, default=None,
+ help='comma-separated list of column indices or ranges (from zero) to be processed, e.g. "0,3-5,7". Indices out of range will be ignored')
parser.add_argument('-c', '--center-aligned-columns', metavar='CENTER_ALIGNED_COLUMNS', nargs='*',
type=int, default=[], help='column numbers with center alignment (from zero)')
parser.add_argument('-r', '--right-aligned-columns', metavar='RIGHT_ALIGNED_COLUMNS', nargs='*',
@@ -20,15 +24,39 @@ def main():
help='specify that the input CSV file has no header row. Will create default headers in Excel format (a,b,c,...)')
args = parser.parse_args()
- if not args.files:
- table = Table.parse_csv(sys.stdin, args.delimiter, args.quotechar)
- print(table.markdown(args.center_aligned_columns, args.right_aligned_columns, args.no_header_row))
- return
+ try:
+ columns = parse_columns(args.columns)
+ except BaseError as e:
+ parser.error(e)
- for file in args.files:
- table = Table.parse_csv(file, args.delimiter, args.quotechar)
+ for file in [sys.stdin] if not args.files else args.files:
+ table = Table.parse_csv(file, args.delimiter, args.quotechar, columns)
print(table.markdown(args.center_aligned_columns, args.right_aligned_columns, args.no_header_row))
+def parse_columns(columns):
+ if not columns:
+ return None
+
+ result = []
+
+ for c in columns.split(','):
+ if '-' in c:
+ try:
+ a, b = map(int, c.split('-', 1))
+ except ValueError:
+ raise ColumnIdentifierError(c)
+
+ result.extend(range(a, b + 1))
+ else:
+ if not c.isdecimal():
+ raise ColumnIdentifierError(c)
+
+ column = int(c)
+ result.append(column)
+
+ return result
+
+
if __name__ == '__main__':
main()
diff --git a/csv2md/exceptions.py b/csv2md/exceptions.py
new file mode 100644
index 0000000..1fa4de4
--- /dev/null
+++ b/csv2md/exceptions.py
@@ -0,0 +1,8 @@
+class BaseError(Exception):
+ pass
+
+
+class ColumnIdentifierError(BaseError):
+ def __init__(self, column):
+ msg = f'Invalid column identifier "{column}". Must be non-negative integer or range of non-negative integers separated by "-".'
+ super().__init__(msg)
diff --git a/csv2md/table.py b/csv2md/table.py
index 4452cfc..36a9ae1 100644
--- a/csv2md/table.py
+++ b/csv2md/table.py
@@ -37,8 +37,15 @@ class Table:
return '\n'.join(rows)
@staticmethod
- def parse_csv(file, delimiter=',', quotechar='"'):
- return Table(list(csv.reader(file, delimiter=delimiter, quotechar=quotechar)))
+ def parse_csv(file, delimiter=',', quotechar='"', columns=None):
+ reader = csv.reader(file, delimiter=delimiter, quotechar=quotechar)
+
+ if columns is None:
+ cells = list(reader)
+ else:
+ cells = [[row[i] for i in columns if 0 <= i < len(row)] for row in reader]
+
+ return Table(cells)
@staticmethod
def make_default_headers(n):
|
lzakharov/csv2md
|
59091d2e28a09240a0b4d7a265b043526e5a3c63
|
diff --git a/csv2md/test_table.py b/csv2md/test_table.py
index a6128cf..0ca823e 100644
--- a/csv2md/test_table.py
+++ b/csv2md/test_table.py
@@ -26,6 +26,15 @@ normal_cells = [
normal_widths = [4, 5, 26, 33, 7]
+filtered_columns_cells = [
+ ['year', 'model', 'description'],
+ ['1997', 'E350', 'ac, abs, moon'],
+ ['1999', 'Venture «Extended Edition»', ''],
+ ['1996', 'Grand Cherokee', 'MUST SELL! air, moon roof, loaded']
+]
+
+filtered_columns_widths = [4, 26, 33]
+
normal_md = (
'| year | make | model | description | price |\n'
'| ---- | ----- | -------------------------- | --------------------------------- | ------- |\n'
@@ -91,6 +100,20 @@ class TestTable(TestCase):
self.assertEqual(expected_cells, actual.cells)
self.assertEqual(expected_widths, actual.widths)
+ def test_parse_csv_with_columns(self):
+ expected_cells = filtered_columns_cells
+ expected_widths = filtered_columns_widths
+ actual = Table.parse_csv(io.StringIO(normal_csv), columns=[0, 2, 3])
+ self.assertEqual(expected_cells, actual.cells)
+ self.assertEqual(expected_widths, actual.widths)
+
+ def test_parse_csv_with_invalid_columns(self):
+ expected_cells = filtered_columns_cells
+ expected_widths = filtered_columns_widths
+ actual = Table.parse_csv(io.StringIO(normal_csv), columns=[-10, -1, 0, 2, 3, 5, 10, 100])
+ self.assertEqual(expected_cells, actual.cells)
+ self.assertEqual(expected_widths, actual.widths)
+
def test_make_default_headers(self):
expected = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
|
Ability to filter columns
It would be great if there was an easy way to provide a list of columns to be used when converting to markdown
So, for example, with a table with 10 columns, doing something like
--use-only=1-3,4,5,7
would not include columns 6, 8 9 and 10 into the generated table
|
0.0
|
59091d2e28a09240a0b4d7a265b043526e5a3c63
|
[
"csv2md/test_table.py::TestTable::test_parse_csv_with_columns",
"csv2md/test_table.py::TestTable::test_parse_csv_with_invalid_columns"
] |
[
"csv2md/test_table.py::TestTable::test_make_default_headers",
"csv2md/test_table.py::TestTable::test_markdown",
"csv2md/test_table.py::TestTable::test_markdown_empty_table",
"csv2md/test_table.py::TestTable::test_markdown_with_alignment",
"csv2md/test_table.py::TestTable::test_markdown_with_default_columns",
"csv2md/test_table.py::TestTable::test_parse_csv",
"csv2md/test_table.py::TestTable::test_parse_csv_with_custom_delimiter"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-06 15:05:06+00:00
|
mit
| 3,657 |
|
m-labs__nmigen-50
|
diff --git a/nmigen/hdl/rec.py b/nmigen/hdl/rec.py
index 999c85b..f1ddbd2 100644
--- a/nmigen/hdl/rec.py
+++ b/nmigen/hdl/rec.py
@@ -72,7 +72,7 @@ class Record(Value):
def concat(a, b):
if a is None:
return b
- return "{}_{}".format(a, b)
+ return "{}__{}".format(a, b)
self.layout = Layout.wrap(layout)
self.fields = OrderedDict()
|
m-labs/nmigen
|
81ee2db1636a504d2e60fc4649db4afde8b27e4c
|
diff --git a/nmigen/test/test_hdl_rec.py b/nmigen/test/test_hdl_rec.py
index 3f68247..65e8bf6 100644
--- a/nmigen/test/test_hdl_rec.py
+++ b/nmigen/test/test_hdl_rec.py
@@ -65,11 +65,11 @@ class RecordTestCase(FHDLTestCase):
])
])
- self.assertEqual(repr(r), "(rec r stb data (rec r_info a b))")
+ self.assertEqual(repr(r), "(rec r stb data (rec r__info a b))")
self.assertEqual(len(r), 35)
self.assertIsInstance(r.stb, Signal)
- self.assertEqual(r.stb.name, "r_stb")
- self.assertEqual(r["stb"].name, "r_stb")
+ self.assertEqual(r.stb.name, "r__stb")
+ self.assertEqual(r["stb"].name, "r__stb")
def test_unnamed(self):
r = [Record([
|
seperator of signal names in records causes ambiguity
When creating a `Record`, the signals inside it are named `recordname_fieldname` currently.
This makes it hard to see the hierarchy when using snake case names for records and fields as well. I.e. having a record named `some_record` with a field named `some_field` the resulting signal is called `some_record_some_field` which could also be the name of a signal in a nested record with depth 3.
I propose to use another character for separation (for example `.`), which would lead to `some_record.some_field` in that case.
(see: https://github.com/m-labs/nmigen/blob/master/nmigen/hdl/rec.py#L75)
|
0.0
|
81ee2db1636a504d2e60fc4649db4afde8b27e4c
|
[
"nmigen/test/test_hdl_rec.py::RecordTestCase::test_basic"
] |
[
"nmigen/test/test_hdl_rec.py::LayoutTestCase::test_fields",
"nmigen/test/test_hdl_rec.py::LayoutTestCase::test_wrong_direction",
"nmigen/test/test_hdl_rec.py::LayoutTestCase::test_wrong_field",
"nmigen/test/test_hdl_rec.py::LayoutTestCase::test_wrong_name",
"nmigen/test/test_hdl_rec.py::LayoutTestCase::test_wrong_name_duplicate",
"nmigen/test/test_hdl_rec.py::LayoutTestCase::test_wrong_shape",
"nmigen/test/test_hdl_rec.py::RecordTestCase::test_unnamed",
"nmigen/test/test_hdl_rec.py::RecordTestCase::test_wrong_field",
"nmigen/test/test_hdl_rec.py::RecordTestCase::test_wrong_field_unnamed"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-03-25 13:56:04+00:00
|
bsd-2-clause
| 3,658 |
|
m-lundberg__simple-pid-4
|
diff --git a/README.md b/README.md
index c564b5a..f03e739 100644
--- a/README.md
+++ b/README.md
@@ -62,6 +62,18 @@ To disable the PID so that no new values are computed, set auto mode to False:
pid.auto_mode = False # no new values will be computed when pid is called
pid.auto_mode = True # pid is enabled again
```
+When disabling the PID and controlling a system manually, it might be useful to tell the PID controller where to start from when giving back control to it. This can be done by enabling auto mode like this:
+```python
+pid.set_auto_mode(True, last_output=8.0)
+```
+This will set the I-term to the value given to `last_output`, meaning that if the system that is being controlled was stable at that output value the PID will keep the system stable if started from that point, without any big bumps in the output when turning the PID back on.
+
+When disabling the PID and controlling a system manually, it might be useful to tell the PID controller where to start from when giving back control to it. This can be done by enabling auto mode like this:
+```python
+pid.set_auto_mode(True, last_output=8.0)
+```
+This will set the I-term to the value given to `last_output`, meaning that if the system that is being controlled was stable at that output value the PID will keep the system stable if started from that point, without any big bumps in the output when turning the PID back on.
+
In order to get output values in a certain range, and also to avoid [integral windup](https://en.wikipedia.org/wiki/Integral_windup) (since the integral term will never be allowed to grow outside of these limits), the output can be limited to a range:
```python
@@ -69,6 +81,11 @@ pid.output_limits = (0, 10) # output value will be between 0 and 10
pid.output_limits = (0, None) # output will always be above 0, but with no upper bound
```
+When tuning the PID, it can be useful to see how each of the components contribute to the output. They can be seen like this:
+```python
+p, i, d = pid.components # the separate terms are now in p, i, d
+```
+
To eliminate overshoot in certain types of systems, you can calculate the [proportional term directly on the measurement](http://brettbeauregard.com/blog/2017/06/introducing-proportional-on-measurement/) instead of the error. This can be enabled like this:
```python
pid.proportional_on_measurement = True
diff --git a/simple_pid/PID.py b/simple_pid/PID.py
index e71ccc6..39dafdd 100644
--- a/simple_pid/PID.py
+++ b/simple_pid/PID.py
@@ -144,7 +144,27 @@ class PID(object):
# switching from manual mode to auto, reset
self._last_output = None
self._last_input = None
- self._error_sum = 0
+ self._last_time = _current_time()
+ self._error_sum = _clamp(0, self.output_limits)
+
+ self._auto_mode = enabled
+
+ def set_auto_mode(self, enabled, last_output=None):
+ """
+ Enable or disable the PID controller, optionally setting the last output value.
+ This is useful if some system has been manually controlled and if the PID should take over.
+ In that case, pass the last output variable (the control variable) and it will be set as the starting
+ I-term when the PID is set to auto mode.
+ :param enabled: Whether auto mode should be enabled, True or False
+ :param last_output: The last output, or the control variable, that the PID should start from
+ when going from manual mode to auto mode
+ """
+ if enabled and not self._auto_mode:
+ # switching from manual mode to auto, reset
+ self._last_output = last_output
+ self._last_input = None
+ self._last_time = _current_time()
+ self._error_sum = (last_output if last_output is not None else 0)
self._error_sum = _clamp(self._error_sum, self.output_limits)
self._auto_mode = enabled
|
m-lundberg/simple-pid
|
45c3a197901f8fd6ebdc606bbdf18be9b8fd8a02
|
diff --git a/tests/test_pid.py b/tests/test_pid.py
index 9c1d1f0..4ffa89a 100644
--- a/tests/test_pid.py
+++ b/tests/test_pid.py
@@ -133,6 +133,18 @@ def test_auto_mode():
assert pid._error_sum == 0
assert pid(8) == 2
+ # last update time should be reset to avoid huge dt
+ from simple_pid.PID import _current_time
+ pid.auto_mode = False
+ time.sleep(1)
+ pid.auto_mode = True
+ assert _current_time() - pid._last_time < 0.01
+
+ # check that setting last_output works
+ pid.auto_mode = False
+ pid.set_auto_mode(True, last_output=10)
+ assert pid._error_sum == 10
+
def test_separate_components():
pid = PID(1, 0, 1, setpoint=10, sample_time=0.1)
|
Auto_Mode toggling causes disturbance
Hi,
Firstly, great tool, thank you!
I have been working on implementing the controller and I think I've come across an unintended bug in the code when it comes to toggling "auto_mode" off and on again.
The issue arises for two reasons:
1.
self._last_time is not updated during calls that take place while auto_mode is off, leading to a potentially huge dt term once auto_mode is re-enabled.
2.
The self._error_sum is reset to 0 when auto_mode is re-enabled. I think this should be reset to whatever the last output was set to (possibly in manual mode) to ensure the control begins again as though it never stopped being in control. Lets say in manual mode I've changed my output and my input (PV) has settled at a new value. If I re-enable auto_mode having changed the setpoint to match my stable input value, then my derivative and proportional terms will be zero (proportional because error=0, derivative because d_input reset to 0 via _last_input being reset to None).
In order for the output to hold (the intended behaviour presumably), the error_sum term must be set to my previous output value thus mimicking the state of the controller if it had been responsible for the new stable input value. (Apologies if that isn't clear).
I've implemented the above changes and have successfully removed the "kick" that I was seeing upon re-enabling auto_mode. Let me know your thoughts.
Best,
James
|
0.0
|
45c3a197901f8fd6ebdc606bbdf18be9b8fd8a02
|
[
"tests/test_pid.py::test_auto_mode"
] |
[
"tests/test_pid.py::test_zero",
"tests/test_pid.py::test_P",
"tests/test_pid.py::test_P_negative_setpoint",
"tests/test_pid.py::test_I",
"tests/test_pid.py::test_I_negative_setpoint",
"tests/test_pid.py::test_D",
"tests/test_pid.py::test_D_negative_setpoint",
"tests/test_pid.py::test_desired_state",
"tests/test_pid.py::test_output_limits",
"tests/test_pid.py::test_sample_time",
"tests/test_pid.py::test_monotonic",
"tests/test_pid.py::test_separate_components",
"tests/test_pid.py::test_clamp",
"tests/test_pid.py::test_converge_system"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-19 22:37:51+00:00
|
mit
| 3,659 |
|
m-lundberg__simple-pid-41
|
diff --git a/README.md b/README.md
index 150d087..907aa03 100644
--- a/README.md
+++ b/README.md
@@ -99,6 +99,12 @@ To eliminate overshoot in certain types of systems, you can calculate the [propo
pid.proportional_on_measurement = True
```
+#### Differential on measurement
+By default the [differential term is calculated on the measurement](http://brettbeauregard.com/blog/2011/04/improving-the-beginner%e2%80%99s-pid-derivative-kick/) instead of the error. This can be disabled like this:
+```python
+pid.differential_on_measurement = False
+```
+
#### Error mapping
To transform the error value to another domain before doing any computations on it, you can supply an `error_map` callback function to the PID. The callback function should take one argument which is the error from the setpoint. This can be used e.g. to get a degree value error in a yaw angle control with values between [-pi, pi):
```python
diff --git a/simple_pid/pid.py b/simple_pid/pid.py
index db66c3f..ffcbfbf 100644
--- a/simple_pid/pid.py
+++ b/simple_pid/pid.py
@@ -35,6 +35,7 @@ class PID(object):
output_limits=(None, None),
auto_mode=True,
proportional_on_measurement=False,
+ differetial_on_measurement=True,
error_map=None,
):
"""
@@ -58,6 +59,8 @@ class PID(object):
:param proportional_on_measurement: Whether the proportional term should be calculated on
the input directly rather than on the error (which is the traditional way). Using
proportional-on-measurement avoids overshoot for some types of systems.
+ :param differetial_on_measurement: Whether the differential term should be calculated on
+ the input directly rather than on the error (which is the traditional way).
:param error_map: Function to transform the error value in another constrained value.
"""
self.Kp, self.Ki, self.Kd = Kp, Ki, Kd
@@ -67,6 +70,7 @@ class PID(object):
self._min_output, self._max_output = None, None
self._auto_mode = auto_mode
self.proportional_on_measurement = proportional_on_measurement
+ self.differetial_on_measurement = differetial_on_measurement
self.error_map = error_map
self._proportional = 0
@@ -75,6 +79,7 @@ class PID(object):
self._last_time = None
self._last_output = None
+ self._last_error = None
self._last_input = None
self.output_limits = output_limits
@@ -107,6 +112,7 @@ class PID(object):
# Compute error terms
error = self.setpoint - input_
d_input = input_ - (self._last_input if (self._last_input is not None) else input_)
+ d_error = error - (self._last_error if (self._last_error is not None) else error)
# Check if must map the error
if self.error_map is not None:
@@ -124,7 +130,10 @@ class PID(object):
self._integral += self.Ki * error * dt
self._integral = _clamp(self._integral, self.output_limits) # Avoid integral windup
- self._derivative = -self.Kd * d_input / dt
+ if self.differetial_on_measurement:
+ self._derivative = -self.Kd * d_input / dt
+ else:
+ self._derivative = self.Kd * d_error / dt
# Compute final output
output = self._proportional + self._integral + self._derivative
@@ -133,6 +142,7 @@ class PID(object):
# Keep track of state
self._last_output = output
self._last_input = input_
+ self._last_error = error
self._last_time = now
return output
@@ -143,7 +153,8 @@ class PID(object):
'Kp={self.Kp!r}, Ki={self.Ki!r}, Kd={self.Kd!r}, '
'setpoint={self.setpoint!r}, sample_time={self.sample_time!r}, '
'output_limits={self.output_limits!r}, auto_mode={self.auto_mode!r}, '
- 'proportional_on_measurement={self.proportional_on_measurement!r},'
+ 'proportional_on_measurement={self.proportional_on_measurement!r}, '
+ 'differetial_on_measurement={self.differetial_on_measurement!r}, '
'error_map={self.error_map!r}'
')'
).format(self=self)
diff --git a/simple_pid/pid.pyi b/simple_pid/pid.pyi
index dfe451a..2df4f23 100644
--- a/simple_pid/pid.pyi
+++ b/simple_pid/pid.pyi
@@ -15,6 +15,7 @@ class PID(object):
setpoint: float
sample_time: Optional[float]
proportional_on_measurement: bool
+ differential_on_measurement: bool
error_map: Optional[Callable[[float], float]]
def __init__(
self,
@@ -26,6 +27,7 @@ class PID(object):
output_limits: _Limits = ...,
auto_mode: bool = ...,
proportional_on_measurement: bool = ...,
+ differential_on_measurement: bool = ...,
error_map: Optional[Callable[[float], float]] = ...,
) -> None: ...
def __call__(self, input_: float, dt: Optional[float] = ...) -> Optional[float]: ...
|
m-lundberg/simple-pid
|
7cd080473e582e4de29b49f9d002d2bd62ef33ca
|
diff --git a/tests/test_pid.py b/tests/test_pid.py
index 886347e..6364684 100644
--- a/tests/test_pid.py
+++ b/tests/test_pid.py
@@ -218,6 +218,27 @@ def test_converge_system():
assert abs(pv - 5) < 0.1
+def test_converge_diff_on_error():
+ pid = PID(1, 0.8, 0.04, setpoint=5, output_limits=(-5, 5), differetial_on_measurement=False)
+ pv = 0 # Process variable
+
+ def update_system(c, dt):
+ # Calculate a simple system model
+ return pv + c * dt - 1 * dt
+
+ start_time = time.time()
+ last_time = start_time
+
+ while time.time() - start_time < 12:
+ c = pid(pv)
+ pv = update_system(c, time.time() - last_time)
+
+ last_time = time.time()
+
+ # Check if system has converged
+ assert abs(pv - 5) < 0.1
+
+
def test_error_map():
import math
|
Derivative Term is calculated on error and not input
the derivative term is calculated on the derivative of the error term and not on the derivative of the input ([see here](https://en.wikipedia.org/wiki/PID_controller#Derivative_term)).
The code below should not contain something like dinput but a derror term which should be calculated somewhere.
https://github.com/m-lundberg/simple-pid/blob/15a030fc81f73dc6bfaf8c3084dcf49a415d7e4b/simple_pid/PID.py#L127
|
0.0
|
7cd080473e582e4de29b49f9d002d2bd62ef33ca
|
[
"tests/test_pid.py::test_converge_diff_on_error"
] |
[
"tests/test_pid.py::test_zero",
"tests/test_pid.py::test_P",
"tests/test_pid.py::test_P_negative_setpoint",
"tests/test_pid.py::test_I",
"tests/test_pid.py::test_I_negative_setpoint",
"tests/test_pid.py::test_D",
"tests/test_pid.py::test_D_negative_setpoint",
"tests/test_pid.py::test_desired_state",
"tests/test_pid.py::test_output_limits",
"tests/test_pid.py::test_sample_time",
"tests/test_pid.py::test_monotonic",
"tests/test_pid.py::test_auto_mode",
"tests/test_pid.py::test_separate_components",
"tests/test_pid.py::test_clamp",
"tests/test_pid.py::test_repr",
"tests/test_pid.py::test_converge_system",
"tests/test_pid.py::test_error_map"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-18 08:42:19+00:00
|
mit
| 3,660 |
|
m0nhawk__grafana_api-39
|
diff --git a/Pipfile b/Pipfile
index f534687..2bc2f58 100644
--- a/Pipfile
+++ b/Pipfile
@@ -8,7 +8,7 @@ name = "grafana_api"
[dev-packages]
codecov = "~=2.0"
coverage = "~=4.5"
-mock = {version = "*", markers = "python_version <= '2.7'"}
+mock = {version = "*",markers = "python_version <= '2.7'"}
pylint = ">=1.9"
requests-mock = "~=1.6"
unittest-xml-reporting = "~=2.5"
diff --git a/grafana_api/grafana_api.py b/grafana_api/grafana_api.py
index 7c81439..e965768 100644
--- a/grafana_api/grafana_api.py
+++ b/grafana_api/grafana_api.py
@@ -94,25 +94,17 @@ class GrafanaAPI:
r = runner(
__url, json=json, headers=headers, auth=self.auth, verify=self.verify
)
- try:
-
- if 500 <= r.status_code < 600:
- raise GrafanaServerError(
- "Client Error {0}: {1}".format(r.status_code, r.json()['message'])
- )
- elif r.status_code == 400:
- raise GrafanaBadInputError("Bad Input: `{0}`".format(r.text))
- elif r.status_code == 401:
- raise GrafanaUnauthorizedError("Unauthorized")
- elif 400 <= r.status_code < 500:
- raise GrafanaClientError(
- "Client Error {0}: {1}".format(r.status_code, r.text)
- )
- return r.json()
-
- except Exception as error:
- print('Caught this error: ' + repr(error))
-
-
-
+ if 500 <= r.status_code < 600:
+ raise GrafanaServerError(
+ "Client Error {0}: {1}".format(r.status_code, r.json()['message'])
+ )
+ elif r.status_code == 400:
+ raise GrafanaBadInputError("Bad Input: `{0}`".format(r.text))
+ elif r.status_code == 401:
+ raise GrafanaUnauthorizedError("Unauthorized")
+ elif 400 <= r.status_code < 500:
+ raise GrafanaClientError(
+ "Client Error {0}: {1}".format(r.status_code, r.text)
+ )
+ return r.json()
return __request_runnner
|
m0nhawk/grafana_api
|
bfbe1dd6a4e90e271b036444942faf1ad6b70784
|
diff --git a/test/api/test_annotations.py b/test/api/test_annotations.py
index 3299748..3f278f7 100644
--- a/test/api/test_annotations.py
+++ b/test/api/test_annotations.py
@@ -99,29 +99,29 @@ class AnnotationsTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_delete_annotations_by_id_could_not_find(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Could not find annotation to update"},status_code=500)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaServerError)
+ with self.assertRaises(GrafanaServerError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
def test_delete_annotations_by_id_forbidden(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Forbidden"},
status_code=403)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaClientError)
+ with self.assertRaises(GrafanaClientError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
def test_delete_annotations_by_id_unauthorized(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Unauthorized"},
status_code=401)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaUnauthorizedError)
+ with self.assertRaises(GrafanaUnauthorizedError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
def test_delete_annotations_by_id_bad_input(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Bad Input"},
status_code=400)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaBadInputError)
+ with self.assertRaises(GrafanaBadInputError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
diff --git a/test/api/test_folder.py b/test/api/test_folder.py
index 66b5f44..319eeaa 100644
--- a/test/api/test_folder.py
+++ b/test/api/test_folder.py
@@ -92,8 +92,8 @@ class FolderTestCase(unittest.TestCase):
"message": "Folder title cannot be empty"
}, status_code=400
)
- folder = self.cli.folder.create_folder(title="Departmenet ABC")
- self.assertRaises(GrafanaBadInputError)
+ with self.assertRaises(GrafanaBadInputError):
+ folder = self.cli.folder.create_folder(title="Departmenet ABC")
@requests_mock.Mocker()
def test_update_folder(self, m):
diff --git a/test/api/test_search.py b/test/api/test_search.py
index 134696b..cffa33c 100644
--- a/test/api/test_search.py
+++ b/test/api/test_search.py
@@ -46,5 +46,5 @@ class AnnotationsTestCase(unittest.TestCase):
}, status_code=400
)
- result = self.cli.search.search_dashboards()
- self.assertRaises(GrafanaBadInputError)
+ with self.assertRaises(GrafanaBadInputError):
+ result = self.cli.search.search_dashboards()
|
grafana_api does not pass errors to caller anymore
**Describe the bug**
```
in grafana_api.py, line 114:
except Exception as error:
print('Caught this error: ' + repr(error))
```
you are catching all exceptions you were throwing before - with the result, that all issues are hidden to the code that actually uses grafana_api. This is pretty much broken.
According to the commit, this is supposed to fix unit tests. If this fixes unit tests, they are also broken...
Expected would be that the errors are being raised.
|
0.0
|
bfbe1dd6a4e90e271b036444942faf1ad6b70784
|
[
"test/api/test_search.py::AnnotationsTestCase::test_search_dashboards_with_out_filter",
"test/api/test_folder.py::FolderTestCase::test_create_folder_empty_uid",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_forbidden",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_could_not_find",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_bad_input",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_unauthorized"
] |
[
"test/api/test_search.py::AnnotationsTestCase::test_search_dashboards",
"test/api/test_folder.py::FolderTestCase::test_get_folder_by_id",
"test/api/test_folder.py::FolderTestCase::test_update_folder",
"test/api/test_folder.py::FolderTestCase::test_get_folder_permissions",
"test/api/test_folder.py::FolderTestCase::test_delete_folder",
"test/api/test_folder.py::FolderTestCase::test_update_folder_permissions",
"test/api/test_folder.py::FolderTestCase::test_update_folder_some_param",
"test/api/test_folder.py::FolderTestCase::test_get_folder",
"test/api/test_folder.py::FolderTestCase::test_create_folder",
"test/api/test_folder.py::FolderTestCase::test_get_all_folders",
"test/api/test_annotations.py::AnnotationsTestCase::test_update_annotation",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id",
"test/api/test_annotations.py::AnnotationsTestCase::test_add_annotation",
"test/api/test_annotations.py::AnnotationsTestCase::test_annotations_with_out_param",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_region_id",
"test/api/test_annotations.py::AnnotationsTestCase::test_add_annotation_graphite",
"test/api/test_annotations.py::AnnotationsTestCase::test_annotations"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-19 17:18:02+00:00
|
mit
| 3,661 |
|
m0nhawk__grafana_api-43
|
diff --git a/grafana_api/grafana_api.py b/grafana_api/grafana_api.py
index e965768..bb7cc5e 100644
--- a/grafana_api/grafana_api.py
+++ b/grafana_api/grafana_api.py
@@ -56,9 +56,11 @@ class GrafanaAPI:
url_path_prefix="",
protocol="http",
verify=True,
+ timeout=5.0,
):
self.auth = auth
self.verify = verify
+ self.timeout = timeout
self.url_host = host
self.url_port = port
self.url_path_prefix = url_path_prefix
@@ -92,7 +94,7 @@ class GrafanaAPI:
__url = "%s%s" % (self.url, url)
runner = getattr(self.s, item.lower())
r = runner(
- __url, json=json, headers=headers, auth=self.auth, verify=self.verify
+ __url, json=json, headers=headers, auth=self.auth, verify=self.verify, timeout=self.timeout
)
if 500 <= r.status_code < 600:
raise GrafanaServerError(
diff --git a/grafana_api/grafana_face.py b/grafana_api/grafana_face.py
index dcc8667..f9fe53b 100644
--- a/grafana_api/grafana_face.py
+++ b/grafana_api/grafana_face.py
@@ -24,6 +24,7 @@ class GrafanaFace:
url_path_prefix="",
protocol="http",
verify=True,
+ timeout=5.0,
):
self.api = GrafanaAPI(
auth,
@@ -32,6 +33,7 @@ class GrafanaFace:
url_path_prefix=url_path_prefix,
protocol=protocol,
verify=verify,
+ timeout=timeout,
)
self.admin = Admin(self.api)
self.dashboard = Dashboard(self.api)
|
m0nhawk/grafana_api
|
b5f1266273fb836580b03224456843b043089814
|
diff --git a/test/test_grafana.py b/test/test_grafana.py
index d4affad..1c8ede3 100644
--- a/test/test_grafana.py
+++ b/test/test_grafana.py
@@ -67,8 +67,22 @@ class TestGrafanaAPI(unittest.TestCase):
headers=None,
json=None,
verify=False,
+ timeout=5.0,
)
+ def test_grafana_api_timeout(self):
+ cli = GrafanaFace(
+ ("admin", "admin"),
+ host="play.grafana.org",
+ url_path_prefix="",
+ protocol="https",
+ verify=False,
+ timeout=0.0001
+ )
+
+ with self.assertRaises(requests.exceptions.Timeout):
+ cli.folder.get_all_folders()
+
def test_grafana_api_basic_auth(self):
cli = GrafanaFace(
("admin", "admin"), host="localhost", url_path_prefix="", protocol="https",port="3000"
|
Missing timeouts
**Describe the bug**
The requests never timeout, this is not a good idea in general
**Expected behavior**
The user should be able to set one and there should be a default (maybe 10s)
|
0.0
|
b5f1266273fb836580b03224456843b043089814
|
[
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_no_verify",
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_timeout"
] |
[
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api",
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_basic_auth",
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_token_auth"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-09 02:16:17+00:00
|
mit
| 3,662 |
|
m3dev__gokart-147
|
diff --git a/gokart/redis_lock.py b/gokart/redis_lock.py
index d371b67..a287fd0 100644
--- a/gokart/redis_lock.py
+++ b/gokart/redis_lock.py
@@ -11,11 +11,11 @@ logger = getLogger(__name__)
class RedisParams(NamedTuple):
- redis_host: str = None
- redis_port: str = None
- redis_timeout: int = None
- redis_key: str = None
- should_redis_lock: bool = 180
+ redis_host: str
+ redis_port: str
+ redis_timeout: int
+ redis_key: str
+ should_redis_lock: bool
class RedisClient:
diff --git a/gokart/target.py b/gokart/target.py
index dc2cad1..c9d890c 100644
--- a/gokart/target.py
+++ b/gokart/target.py
@@ -77,7 +77,7 @@ class SingleFileTarget(TargetOnKart):
self,
target: luigi.target.FileSystemTarget,
processor: FileProcessor,
- redis_params: RedisParams = RedisParams(),
+ redis_params: RedisParams,
) -> None:
self._target = target
self._processor = processor
@@ -115,7 +115,7 @@ class ModelTarget(TargetOnKart):
temporary_directory: str,
load_function,
save_function,
- redis_params: RedisParams = RedisParams(),
+ redis_params: RedisParams,
) -> None:
self._zip_client = make_zip_client(file_path, temporary_directory)
self._temporary_directory = temporary_directory
diff --git a/gokart/task.py b/gokart/task.py
index 4aa99b2..15b3b62 100644
--- a/gokart/task.py
+++ b/gokart/task.py
@@ -61,8 +61,7 @@ class TaskOnKart(luigi.Task):
self._rerun_state = self.rerun
def output(self):
- file_path = self.__module__.replace(".", "/")
- return self.make_target(os.path.join(file_path, f"{type(self).__name__}.pkl"))
+ return self.make_target()
def requires(self):
tasks = self.make_task_instance_dictionary()
@@ -131,8 +130,10 @@ class TaskOnKart(luigi.Task):
return cls(**new_k)
- def make_target(self, relative_file_path: str, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart:
- file_path = os.path.join(self.workspace_directory, relative_file_path)
+ def make_target(self, relative_file_path: str = None, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart:
+ formatted_relative_file_path = relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace(".", "/"),
+ f"{type(self).__name__}.pkl")
+ file_path = os.path.join(self.workspace_directory, formatted_relative_file_path)
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_target(file_path=file_path,
unique_id=unique_id,
@@ -141,8 +142,10 @@ class TaskOnKart(luigi.Task):
redis_port=self.redis_port,
redis_timeout=self.redis_timeout)
- def make_large_data_frame_target(self, relative_file_path: str, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart:
- file_path = os.path.join(self.workspace_directory, relative_file_path)
+ def make_large_data_frame_target(self, relative_file_path: str = None, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart:
+ formatted_relative_file_path = relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace(".", "/"),
+ f"{type(self).__name__}.zip")
+ file_path = os.path.join(self.workspace_directory, formatted_relative_file_path)
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_model_target(file_path=file_path,
temporary_directory=self.local_temporary_directory,
|
m3dev/gokart
|
5a10506e5ef762d384fec1651e9cb56daa276336
|
diff --git a/test/test_task_on_kart.py b/test/test_task_on_kart.py
index 6b5a118..b1f4d10 100644
--- a/test/test_task_on_kart.py
+++ b/test/test_task_on_kart.py
@@ -153,6 +153,12 @@ class TaskTest(unittest.TestCase):
self.assertIsInstance(default_target, SingleFileTarget)
self.assertEqual(f'./resources/test/test_task_on_kart/_DummyTaskD_{task.task_unique_id}.pkl', default_target._target.path)
+ def test_default_large_dataframe_target(self):
+ task = _DummyTaskD()
+ default_large_dataframe_target = task.make_large_data_frame_target()
+ self.assertIsInstance(default_large_dataframe_target, ModelTarget)
+ self.assertEqual(f'./resources/test/test_task_on_kart/_DummyTaskD_{task.task_unique_id}.zip', default_large_dataframe_target._zip_client._file_path)
+
def test_make_target(self):
task = _DummyTask()
target = task.make_target('test.txt')
|
Default path for make_large_data_frame_target
In the same manner as `output()`、it might be great if `make_large_data_frame_target()` can produce default path.
```
file_path = self.__module__.replace('.', '/')
return self.make_large_data_frame_target(os.path.join(file_path, f'{type(self).__name__}.zip'))
```
|
0.0
|
5a10506e5ef762d384fec1651e9cb56daa276336
|
[
"test/test_task_on_kart.py::TaskTest::test_default_large_dataframe_target"
] |
[
"test/test_task_on_kart.py::TaskTest::test_add_cofigureation_evaluation_order",
"test/test_task_on_kart.py::TaskTest::test_add_configuration",
"test/test_task_on_kart.py::TaskTest::test_compare_targets_of_different_tasks",
"test/test_task_on_kart.py::TaskTest::test_complete_when_input_and_output_equal",
"test/test_task_on_kart.py::TaskTest::test_complete_when_modification_time_equals_output",
"test/test_task_on_kart.py::TaskTest::test_complete_with_modified_input",
"test/test_task_on_kart.py::TaskTest::test_complete_with_rerun_flag",
"test/test_task_on_kart.py::TaskTest::test_complete_with_uncompleted_input",
"test/test_task_on_kart.py::TaskTest::test_complete_without_dependency",
"test/test_task_on_kart.py::TaskTest::test_default_requires",
"test/test_task_on_kart.py::TaskTest::test_default_target",
"test/test_task_on_kart.py::TaskTest::test_dump",
"test/test_task_on_kart.py::TaskTest::test_load_data_frame_empty_input",
"test/test_task_on_kart.py::TaskTest::test_load_dictionary_at_once",
"test/test_task_on_kart.py::TaskTest::test_load_generator_with_single_target",
"test/test_task_on_kart.py::TaskTest::test_load_index_only_dataframe",
"test/test_task_on_kart.py::TaskTest::test_load_list_of_list_pandas",
"test/test_task_on_kart.py::TaskTest::test_load_tuple",
"test/test_task_on_kart.py::TaskTest::test_load_with_keyword",
"test/test_task_on_kart.py::TaskTest::test_load_with_single_target",
"test/test_task_on_kart.py::TaskTest::test_make_model_target",
"test/test_task_on_kart.py::TaskTest::test_make_target",
"test/test_task_on_kart.py::TaskTest::test_make_target_with_processor",
"test/test_task_on_kart.py::TaskTest::test_make_target_without_id",
"test/test_task_on_kart.py::TaskTest::test_repr",
"test/test_task_on_kart.py::TaskTest::test_significant_flag",
"test/test_task_on_kart.py::TaskTest::test_use_rerun_with_inherits"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-29 01:30:47+00:00
|
mit
| 3,663 |
|
macbre__sql-metadata-113
|
diff --git a/sql_metadata.py b/sql_metadata.py
index 46aa30b..4a754ed 100644
--- a/sql_metadata.py
+++ b/sql_metadata.py
@@ -171,7 +171,7 @@ def get_query_columns(query: str) -> List[str]:
return unique(columns)
-def _get_token_normalized_value(token: str) -> str:
+def _get_token_normalized_value(token: sqlparse.sql.Token) -> str:
return token.value.translate(str.maketrans("", "", " \n\t\r")).upper()
@@ -208,7 +208,7 @@ def _update_table_names(
"UPDATE",
"TABLE",
]
- and last_token not in ["AS"]
+ and last_token not in ["AS", "WITH"]
and token.value not in ["AS", "SELECT"]
):
if last_token == "." and next_token != ".":
@@ -289,16 +289,18 @@ def get_query_tables(query: str) -> List[str]:
tokens = get_query_tokens(query)
for index, token in enumerate(tokens):
- # print([token, token.ttype, last_token, last_keyword])
-
# remove whitespaces from token value and uppercase
token_val_norm = _get_token_normalized_value(token)
+
+ # print([token, token_val_norm, token.ttype, last_keyword])
+
if token.is_keyword and token_val_norm in table_syntax_keywords:
# keep the name of the last keyword, the next one can be a table name
last_keyword = token_val_norm
# print('keyword', last_keyword)
- elif str(token) == "(":
+ elif str(token) == "(" and last_keyword in ["INTO", "VALUES"]:
# reset the last_keyword for INSERT `foo` VALUES(id, bar) ...
+ # reset the last_keyword for INSERT `foo` (col1, col2) VALUES(id, bar) ...
last_keyword = None
elif token.is_keyword and token_val_norm in ["FORCE", "ORDER", "GROUPBY"]:
# reset the last_keyword for queries like:
|
macbre/sql-metadata
|
aeed2afacdf6367c6671f0497e682ee9852582d3
|
diff --git a/test/test_hive.py b/test/test_hive.py
index 7833a98..0306852 100644
--- a/test/test_hive.py
+++ b/test/test_hive.py
@@ -1,6 +1,8 @@
"""
Set of unit tests for handling of Apache Hive queries
"""
+import pytest
+
from sql_metadata import get_query_columns, get_query_tables
@@ -17,6 +19,8 @@ def test_insert_overwrite_table():
def test_complex_hive_query():
+ pytest.skip("Improve HIVE syntax handling with a new parser (#98)")
+
# https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML#LanguageManualDML-InsertingdataintoHiveTablesfromqueries
dag = """
INSERT OVERWRITE TABLE foo_report
diff --git a/test/test_query.py b/test/test_query.py
index 5ba52eb..980bbb3 100644
--- a/test/test_query.py
+++ b/test/test_query.py
@@ -1,3 +1,5 @@
+import pytest
+
from sql_metadata import (
preprocess_query,
get_query_tokens,
@@ -577,3 +579,41 @@ def test_unions():
assert ["d", "g"] == get_query_tables(
"SELECT a,b,c FROM d UNION ALL SELECT e,f FROM g"
)
+
+
+def test_with_brackets():
+ assert ["database1.table1", "database2.table2"] == get_query_tables(
+ """
+ SELECT
+ "xxxxx"
+ FROM
+ (database1.table1 alias
+ LEFT JOIN database2.table2 ON ("tt"."ttt"."fff" = "xx"."xxx"))
+ """
+ )
+
+ assert ["inner_table"] == get_query_tables(
+ """
+ SELECT
+ t.foo
+ FROM
+ (SELECT foo FROM inner_table
+ WHERE bar = '1') t
+ """
+ )
+
+
+def test_with_with():
+ pytest.skip("Improve WITH syntax handling with a new parser (#98)")
+
+ assert ["table3", "database2.table2"] == get_query_tables(
+ """
+ WITH
+ database1.tableFromWith AS SELECT * FROM table3
+ SELECT
+ "xxxxx"
+ FROM
+ database1.tableFromWith alias
+ LEFT JOIN database2.table2 ON ("tt"."ttt"."fff" = "xx"."xxx")
+ """
+ )
|
missing tables returned by sql_metadata.get_query_tables
Hello,
I have 80 long queries, and for 27 of them, results are not correct for "sql_metadata.get_query_tables(ddl)"
Here is an example:
ddl_for_sql_metadata.sql
```
SELECT
"attr1"
FROM
(
SELECT
"attr2"
FROM
(database1.table1 "aliasTable"
LEFT JOIN (
SELECT
"attr3"
FROM
((
SELECT
"attr4"
FROM
database2.table2
GROUP BY 1, 2, 3, 4
) "aliasTable2"
LEFT JOIN (
SELECT
"attr5"
FROM
database3.table3
GROUP BY 1, 2
) "X" ON ("table"."attr" = "table"."attr"))
) "Y" ON ("table"."attr" = "table"."attr"))
) "Z"
WHERE (myCondition)
GROUP BY 1, 2, 3, 4, 5
```
Quick way to test:
```
import sql_metadata
def read_from_file(file_path):
return open(file_path, "r").read()
ddl = read_from_file("ddl_for_sql_metadata.sql")
tables = sql_metadata.get_query_tables(ddl)
print(tables)
```
It returns only ['database2.table2', 'database3.table3']
and it should return database1.table1
|
0.0
|
aeed2afacdf6367c6671f0497e682ee9852582d3
|
[
"test/test_query.py::test_with_brackets"
] |
[
"test/test_hive.py::test_insert_overwrite_table",
"test/test_query.py::test_get_query_tokens",
"test/test_query.py::test_preprocess_query",
"test/test_query.py::test_get_query_tables",
"test/test_query.py::test_case_insensitive",
"test/test_query.py::test_joins",
"test/test_query.py::test_handle_force_index",
"test/test_query.py::test_get_query_limit_and_offset",
"test/test_query.py::test_insert_into_select",
"test/test_query.py::test_cast_and_convert_functions",
"test/test_query.py::test_case_syntax",
"test/test_query.py::test_select_aliases",
"test/test_query.py::test_multiline_queries",
"test/test_query.py::test_redshift",
"test/test_query.py::test_sql_server_cte",
"test/test_query.py::test_sql_server_cte_sales_by_year",
"test/test_query.py::test_table_name_with_group_by",
"test/test_query.py::test_datasets",
"test/test_query.py::test_table_names_with_dashes",
"test/test_query.py::test_queries_with_null_conditions",
"test/test_query.py::test_queries_with_distinct",
"test/test_query.py::test_unions"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-03-22 16:58:38+00:00
|
mit
| 3,664 |
|
macbre__sql-metadata-127
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index 472ad57..c6a7548 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -57,7 +57,7 @@ class Parser: # pylint: disable=R0902
@property
def tokens(self) -> List[SQLToken]:
"""
- :rtype: list[SQLToken]
+ Tokenizes the query
"""
if self._tokens is not None:
return self._tokens
@@ -129,7 +129,7 @@ class Parser: # pylint: disable=R0902
@property
def columns(self) -> List[str]:
"""
- :rtype: list[str]
+ Returns the list columns this query refers to
"""
if self._columns is not None:
return self._columns
@@ -144,10 +144,16 @@ class Parser: # pylint: disable=R0902
token.last_keyword_normalized in KEYWORDS_BEFORE_COLUMNS
and token.previous_token.normalized != "AS"
):
- if token.normalized not in FUNCTIONS_IGNORED and not (
- # aliases of sub-queries i.e.: select from (...) <alias>
- token.previous_token.is_right_parenthesis
- and token.value in subqueries_names
+ if (
+ token.normalized not in FUNCTIONS_IGNORED
+ and not (
+ # aliases of sub-queries i.e.: select from (...) <alias>
+ token.previous_token.is_right_parenthesis
+ and token.value in subqueries_names
+ )
+ # custom functions - they are followed by the parenthesis
+ # e.g. custom_func(...
+ and not token.next_token.is_left_parenthesis
):
column = token.table_prefixed_column(tables_aliases)
self._add_to_columns_subsection(
@@ -204,7 +210,7 @@ class Parser: # pylint: disable=R0902
@property
def tables(self) -> List[str]:
"""
- :rtype: list[str]
+ Return the list of tables this query refers to
"""
if self._tables is not None:
return self._tables
@@ -247,8 +253,6 @@ class Parser: # pylint: disable=R0902
def limit_and_offset(self) -> Optional[Tuple[int, int]]:
"""
Returns value for limit and offset if set
-
- :rtype: (int, int)
"""
if self._limit_and_offset is not None:
return self._limit_and_offset
@@ -447,8 +451,6 @@ class Parser: # pylint: disable=R0902
def comments(self) -> List[str]:
"""
Return comments from SQL query
-
- :rtype: List[str]
"""
return Generalizator(self._raw_query).comments
@@ -456,8 +458,6 @@ class Parser: # pylint: disable=R0902
def without_comments(self) -> str:
"""
Removes comments from SQL query
-
- :rtype: str
"""
return Generalizator(self._raw_query).without_comments
@@ -468,8 +468,6 @@ class Parser: # pylint: disable=R0902
and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
-
- :rtype: Optional[str]
"""
return Generalizator(self._raw_query).generalize
@@ -489,8 +487,6 @@ class Parser: # pylint: disable=R0902
def _preprocess_query(self) -> str:
"""
Perform initial query cleanup
-
- :rtype str
"""
if self._raw_query == "":
return ""
|
macbre/sql-metadata
|
dce28a7f5dd99286664f3ea2d82b1c0757f89a14
|
diff --git a/test/test_getting_columns.py b/test/test_getting_columns.py
index 6802691..4d50eb0 100644
--- a/test/test_getting_columns.py
+++ b/test/test_getting_columns.py
@@ -210,3 +210,18 @@ def test_complex_queries_columns():
"where": ["cl_type", "cl_to"],
"order_by": ["cl_sortkey"],
}
+
+
+def test_columns_and_sql_functions():
+ """
+ See https://github.com/macbre/sql-metadata/issues/125
+ """
+ assert Parser("select max(col3)+avg(col)+1+sum(col2) from dual").columns == [
+ "col3",
+ "col",
+ "col2",
+ ]
+ assert Parser("select avg(col)+sum(col2) from dual").columns == ["col", "col2"]
+ assert Parser(
+ "select count(col)+max(col2)+ min(col3)+ count(distinct col4) + custom_func(col5) from dual"
+ ).columns == ["col", "col2", "col3", "col4", "col5"]
|
sql_metadata.get_query_columns result error
demo
```
sql_metadata.get_query_columns("select max(col3)+avg(col)+1+sum(col2) from dual")
// result : ['col3', 'avg', 'col', 'sum', 'col2']. avg sum shouldn't be a column
sql_metadata.get_query_columns("select avg(col)+sum(col2) from dual")
// result: ['avg', 'col', 'sum', 'col2'] avg sum shouldn't be a column
sql_metadata.get_query_columns("select count(col)+max(col2)+ min(col3)+ count(distinct col4) + custom_func(col5) from dual")
// result ['col', 'col2', 'col3', 'col4', 'custom_func', 'col5'] custom_func shouldn't be a column
```
it looks like function matching by keyword, I recommend matching by function pattern
|
0.0
|
dce28a7f5dd99286664f3ea2d82b1c0757f89a14
|
[
"test/test_getting_columns.py::test_columns_and_sql_functions"
] |
[
"test/test_getting_columns.py::test_cast_and_convert_functions",
"test/test_getting_columns.py::test_queries_with_null_conditions",
"test/test_getting_columns.py::test_queries_with_distinct",
"test/test_getting_columns.py::test_joins",
"test/test_getting_columns.py::test_getting_columns",
"test/test_getting_columns.py::test_columns_with_order_by",
"test/test_getting_columns.py::test_update_and_replace",
"test/test_getting_columns.py::test_complex_queries_columns"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-30 10:09:01+00:00
|
mit
| 3,665 |
|
macbre__sql-metadata-140
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index 887c505..2ad3e07 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -5,8 +5,8 @@ import re
from typing import Dict, List, Optional, Tuple, Union
import sqlparse
-from sqlparse.sql import TokenList
-from sqlparse.tokens import Whitespace
+from sqlparse.sql import Token
+from sqlparse.tokens import Name, Number, Whitespace
from sql_metadata.generalizator import Generalizator
from sql_metadata.keywords_lists import (
@@ -18,7 +18,7 @@ from sql_metadata.keywords_lists import (
TABLE_ADJUSTMENT_KEYWORDS,
WITH_ENDING_KEYWORDS,
)
-from sql_metadata.token import SQLToken, EmptyToken
+from sql_metadata.token import EmptyToken, SQLToken
from sql_metadata.utils import UniqueList
@@ -59,6 +59,9 @@ class Parser: # pylint: disable=R0902
self._is_in_nested_function = False
self._is_in_with_block = False
self._with_columns_candidates = dict()
+ self._column_aliases_max_subquery_level = dict()
+
+ self.sqlparse_tokens = None
@property
def query(self) -> str:
@@ -81,7 +84,8 @@ class Parser: # pylint: disable=R0902
if not parsed:
return tokens
- sqlparse_tokens = TokenList(parsed[0].tokens).flatten()
+ self.sqlparse_tokens = parsed[0].tokens
+ sqlparse_tokens = self._flatten_sqlparse()
non_empty_tokens = [
token for token in sqlparse_tokens if token.ttype is not Whitespace
]
@@ -145,10 +149,15 @@ class Parser: # pylint: disable=R0902
if (
token.last_keyword_normalized in KEYWORDS_BEFORE_COLUMNS
and token.previous_token.normalized not in ["AS", ")"]
- and token.previous_token.table_prefixed_column(tables_aliases)
- not in columns
- and token.left_expanded not in self.columns_aliases_names
+ and not token.is_alias_without_as
+ and (
+ token.left_expanded not in self.columns_aliases_names
+ or token.token_is_alias_of_self_not_from_subquery(
+ aliases_levels=self._column_aliases_max_subquery_level
+ )
+ )
):
+
if (
token.normalized not in FUNCTIONS_IGNORED
and not (
@@ -331,6 +340,13 @@ class Parser: # pylint: disable=R0902
) and token.value not in with_names + subqueries_names:
alias = token.left_expanded
column_aliases_names.append(alias)
+ current_level = self._column_aliases_max_subquery_level.setdefault(
+ alias, 0
+ )
+ if token.subquery_level > current_level:
+ self._column_aliases_max_subquery_level[
+ alias
+ ] = token.subquery_level
self._columns_aliases_names = column_aliases_names
return self._columns_aliases_names
@@ -726,7 +742,7 @@ class Parser: # pylint: disable=R0902
Returns a list of columns between tw tokens
"""
loop_token = start_token
- aliases = []
+ aliases = UniqueList()
while loop_token.next_token != end_token:
if loop_token.next_token.left_expanded in self._aliases_to_check:
alias_token = loop_token.next_token
@@ -744,13 +760,47 @@ class Parser: # pylint: disable=R0902
# 0. remove newlines
query = self._raw_query.replace("\n", " ")
# 1. remove quotes "
- query = query.replace('"', "")
+ query = query.replace('"', "`")
# 2. `database`.`table` notation -> database.table
query = re.sub(r"`([^`]+)`\.`([^`]+)`", r"\1.\2", query)
return query
+ def _flatten_sqlparse(self):
+ for token in self.sqlparse_tokens:
+ # sqlparse returns mysql digit starting identifiers as group
+ # check https://github.com/andialbrecht/sqlparse/issues/337
+ is_grouped_mysql_digit_name = (
+ token.is_group
+ and len(token.tokens) == 2
+ and token.tokens[0].ttype is Number.Integer
+ and (
+ token.tokens[1].is_group and token.tokens[1].tokens[0].ttype is Name
+ )
+ )
+ if token.is_group and not is_grouped_mysql_digit_name:
+ yield from token.flatten()
+ elif is_grouped_mysql_digit_name:
+ # we have digit starting name
+ new_tok = Token(
+ value=f"{token.tokens[0].normalized}"
+ f"{token.tokens[1].tokens[0].normalized}",
+ ttype=token.tokens[1].tokens[0].ttype,
+ )
+ new_tok.parent = token.parent
+ yield new_tok
+ if len(token.tokens[1].tokens) > 1:
+ # unfortunately there might be nested groups
+ remaining_tokens = token.tokens[1].tokens[1:]
+ for tok in remaining_tokens:
+ if tok.is_group:
+ yield from tok.flatten()
+ else:
+ yield tok
+ else:
+ yield token
+
@property
def _is_create_table_query(self) -> bool:
"""
diff --git a/sql_metadata/token.py b/sql_metadata/token.py
index e59e6c0..8f8a044 100644
--- a/sql_metadata/token.py
+++ b/sql_metadata/token.py
@@ -161,6 +161,22 @@ class SQLToken: # pylint: disable=R0902
and not self.previous_token.is_comment
)
+ @property
+ def is_alias_of_self(self) -> bool:
+ """
+ Checks if a given token is an alias but at the same time
+ is also an alias of self, so not really an alias
+ """
+
+ end_of_column = self.find_nearest_token(
+ [",", "FROM"], value_attribute="normalized", direction="right"
+ )
+ while end_of_column.is_in_nested_function:
+ end_of_column = end_of_column.find_nearest_token(
+ [",", "FROM"], value_attribute="normalized", direction="right"
+ )
+ return end_of_column.previous_token.normalized == self.normalized
+
@property
def is_in_with_columns(self) -> bool:
"""
@@ -171,6 +187,17 @@ class SQLToken: # pylint: disable=R0902
and self.find_nearest_token(")", direction="right").is_with_columns_end
)
+ def token_is_alias_of_self_not_from_subquery(self, aliases_levels: Dict) -> bool:
+ """
+ Checks if token is also an alias, but is an alias of self that is not
+ coming from a subquery, that means it's a valid column
+ """
+ return (
+ self.last_keyword_normalized == "SELECT"
+ and self.is_alias_of_self
+ and self.subquery_level == aliases_levels[self.value]
+ )
+
def table_prefixed_column(self, table_aliases: Dict) -> str:
"""
Substitutes table alias with actual table name
|
macbre/sql-metadata
|
c8062f08684268f5e1822b52ce7f58567982737b
|
diff --git a/test/test_getting_columns.py b/test/test_getting_columns.py
index 26c0ddd..6fd4d8b 100644
--- a/test/test_getting_columns.py
+++ b/test/test_getting_columns.py
@@ -250,3 +250,103 @@ def test_columns_and_sql_functions():
assert Parser(
"select count(col)+max(col2)+ min(col3)+ count(distinct col4) + custom_func(col5) from dual"
).columns == ["col", "col2", "col3", "col4", "col5"]
+
+
+def test_columns_starting_with_keywords():
+ query = """
+ SELECT `schema_name`, full_table_name, `column_name`, `catalog_name`,
+ `table_name`, column_length, column_weight, annotation
+ FROM corporate.all_tables
+ """
+ parser = Parser(query)
+ assert parser.columns == [
+ "schema_name",
+ "full_table_name",
+ "column_name",
+ "catalog_name",
+ "table_name",
+ "column_length",
+ "column_weight",
+ "annotation",
+ ]
+
+
+def test_columns_with_keywords_parts():
+ query = """
+ SELECT column_length, column_weight, table_random, drop_20, create_table
+ FROM sample_table
+ """
+ assert Parser(query).columns == [
+ "column_length",
+ "column_weight",
+ "table_random",
+ "drop_20",
+ "create_table",
+ ]
+
+
+def test_columns_with_complex_aliases_same_as_columns():
+ query = """
+ select targetingtype, sellerid, sguid, 'd01' as datetype, adgroupname, targeting,
+ customersearchterm,
+ 'product_search_term' as `type`,
+ sum(impressions) as impr,
+ sum(clicks) as clicks,
+ sum(seventotalunits) as sold,
+ sum(sevenadvertisedskuunits) as advertisedskuunits,
+ sum(sevenotherskuunits) as otherskuunits,
+ sum(sevendaytotalsales) as totalsales,
+ round(sum(spend), 4) as spend, if(sum(impressions) > 0,
+ round(sum(clicks)/sum(impressions), 4), 0) as ctr,
+ if(sum(clicks) > 0, round(sum(seventotalunits)/sum(clicks), 4), 0) as cr,
+ if(sum(clicks) > 0, round(sum(spend)/sum(clicks), 2), 0) as cpc
+ from amazon_pl.search_term_report_impala
+ where reportday >= to_date('2021-05-16 00:00:00.0')
+ and reportday <= to_date('2021-05-16 00:00:00.0')
+ and targetingtype in ('auto','manual')
+ and sguid is not null and sguid != ''
+ group by targetingtype,sellerid,sguid,adgroupname,targeting,customersearchterm
+ order by impr desc
+ """
+ parser = Parser(query)
+ assert parser.columns == [
+ "targetingtype",
+ "sellerid",
+ "sguid",
+ "adgroupname",
+ "targeting",
+ "customersearchterm",
+ "impressions",
+ "clicks",
+ "seventotalunits",
+ "sevenadvertisedskuunits",
+ "sevenotherskuunits",
+ "sevendaytotalsales",
+ "spend",
+ "reportday",
+ ]
+
+
+def test_columns_with_aliases_same_as_columns():
+ query = """
+ select
+ round(sum(impressions),1) as impressions,
+ sum(clicks) as clicks
+ from amazon_pl.search_term_report_impala
+ """
+ parser = Parser(query)
+ assert parser.columns == ["impressions", "clicks"]
+ assert parser.columns_aliases == {}
+
+ query = """
+ select
+ if(sum(clicks) > 0, round(sum(seventotalunits)/sum(clicks), 4), 0) as clicks,
+ if(sum(clicks) > 0, round(sum(spend)/sum(clicks), 2), 0) as cpc
+ from amazon_pl.search_term_report_impala
+ """
+ parser = Parser(query)
+ assert parser.columns == ["clicks", "seventotalunits", "spend"]
+ assert parser.columns_aliases == {
+ "clicks": ["clicks", "seventotalunits"],
+ "cpc": ["clicks", "spend"],
+ }
diff --git a/test/test_getting_tables.py b/test/test_getting_tables.py
index c9bcc16..9657e64 100644
--- a/test/test_getting_tables.py
+++ b/test/test_getting_tables.py
@@ -418,3 +418,29 @@ with ur
"zekerheid_accijnsbeweging": "accounting_entity.remainder",
"zekerheid_douanevervoer": "accounting_entity.remainder",
}
+
+
+def test_get_tables_with_leading_digits():
+ # see #139
+
+ # Identifiers may begin with a digit but unless quoted may not consist solely of digits.
+ assert ["0020"] == Parser("SELECT * FROM `0020`").tables
+
+ assert ["0020_big_table"] == Parser(
+ "SELECT t.val as value, count(*) FROM `0020_big_table` as t WHERE id BETWEEN 10 AND 20 GROUP BY val"
+ ).tables
+ assert ["0020_big_table"] == Parser(
+ "SELECT t.val as value, count(*) FROM `0020_big_table`"
+ ).tables
+ assert ["0020_big_table"] == Parser(
+ 'SELECT t.val as value, count(*) FROM "0020_big_table" as t WHERE id BETWEEN 10 AND 20 GROUP BY val'
+ ).tables
+ assert ["0020_big_table"] == Parser(
+ "SELECT t.val as value, count(*) FROM 0020_big_table as t WHERE id BETWEEN 10 AND 20 GROUP BY val"
+ ).tables
+ assert ["0020_big_table"] == Parser(
+ "SELECT t.val as value, count(*) FROM `0020_big_table` as t WHERE id BETWEEN 10 AND 20 GROUP BY val"
+ ).tables
+ assert ["0020_big_table"] == Parser(
+ "SELECT t.val as value, count(*) FROM 0020_big_table"
+ ).tables
diff --git a/test/test_unions.py b/test/test_unions.py
new file mode 100644
index 0000000..f0506be
--- /dev/null
+++ b/test/test_unions.py
@@ -0,0 +1,81 @@
+from sql_metadata import Parser
+
+
+def test_union():
+ query = """
+ SELECT
+ACCOUNTING_ENTITY.VERSION as "accountingEntityVersion",
+ACCOUNTING_ENTITY.ACTIVE as "active",
+ACCOUNTING_ENTITY.CATEGORY as "category",
+ACCOUNTING_ENTITY.CREATION_DATE as "creationDate",
+ACCOUNTING_ENTITY.DESCRIPTION as "description",
+ACCOUNTING_ENTITY.ID as "accountingEntityId",
+ACCOUNTING_ENTITY.MINIMAL_REMAINDER as "minimalRemainder",
+ACCOUNTING_ENTITY.REMAINDER as "remainder",
+ACCOUNTING_ENTITY.SYSTEM_TYPE_ID as "aeSystemTypeId",
+ACCOUNTING_ENTITY.DATE_CREATION as "dateCreation",
+ACCOUNTING_ENTITY.DATE_LAST_MODIFICATION as "dateLastModification",
+ACCOUNTING_ENTITY.USER_CREATION as "userCreation",
+ACCOUNTING_ENTITY.USER_LAST_MODIFICATION as "userLastModification"
+FROM ACCOUNTING_ENTITY
+WHERE ACCOUNTING_ENTITY.ID IN (
+SELECT DPD.ACCOUNTING_ENTITY_ID AS "ACCOUNTINGENTITYID" FROM DEBT D
+INNER JOIN DUTY_PER_DEBT DPD ON DPD.DEBT_ID = D.ID
+INNER JOIN DECLARATION_V2 DV2 ON DV2.ID = D.DECLARATION_ID
+WHERE DV2.DECLARATION_REF = #MRNFORMOVEMENT
+UNION
+SELECT BX.ACCOUNTING_ENTITY_ID AS "ACCOUNTINGENTITYID" FROM BENELUX BX
+INNER JOIN DECLARATION_V2 DV2 ON DV2.ID = BX.DECLARATION_ID
+WHERE DV2.DECLARATION_REF = #MRNFORMOVEMENT
+UNION
+SELECT CA4D.ACCOUNTING_ENTITY_ID AS "ACCOUNTINGENTITYID" FROM RESERVATION R
+INNER JOIN CA4_RESERVATIONS_DECLARATION CA4D ON CA4D.ID = R.CA4_ID
+INNER JOIN DECLARATION_V2 DV2 ON DV2.ID = R.DECLARATION_ID
+WHERE DV2.DECLARATION_REF = #MRNFORMOVEMENT)
+ """
+
+ parser = Parser(query)
+ assert parser.tables == [
+ "ACCOUNTING_ENTITY",
+ "DEBT",
+ "DUTY_PER_DEBT",
+ "DECLARATION_V2",
+ "BENELUX",
+ "RESERVATION",
+ "CA4_RESERVATIONS_DECLARATION",
+ ]
+ assert parser.columns_dict == {
+ "join": [
+ "DUTY_PER_DEBT.DEBT_ID",
+ "DEBT.ID",
+ "DECLARATION_V2.ID",
+ "DEBT.DECLARATION_ID",
+ "BENELUX.DECLARATION_ID",
+ "CA4_RESERVATIONS_DECLARATION.ID",
+ "RESERVATION.CA4_ID",
+ "RESERVATION.DECLARATION_ID",
+ ],
+ "select": [
+ "ACCOUNTING_ENTITY.VERSION",
+ "ACCOUNTING_ENTITY.ACTIVE",
+ "ACCOUNTING_ENTITY.CATEGORY",
+ "ACCOUNTING_ENTITY.CREATION_DATE",
+ "ACCOUNTING_ENTITY.DESCRIPTION",
+ "ACCOUNTING_ENTITY.ID",
+ "ACCOUNTING_ENTITY.MINIMAL_REMAINDER",
+ "ACCOUNTING_ENTITY.REMAINDER",
+ "ACCOUNTING_ENTITY.SYSTEM_TYPE_ID",
+ "ACCOUNTING_ENTITY.DATE_CREATION",
+ "ACCOUNTING_ENTITY.DATE_LAST_MODIFICATION",
+ "ACCOUNTING_ENTITY.USER_CREATION",
+ "ACCOUNTING_ENTITY.USER_LAST_MODIFICATION",
+ "DUTY_PER_DEBT.ACCOUNTING_ENTITY_ID",
+ "BENELUX.ACCOUNTING_ENTITY_ID",
+ "CA4_RESERVATIONS_DECLARATION.ACCOUNTING_ENTITY_ID",
+ ],
+ "where": [
+ "ACCOUNTING_ENTITY.ID",
+ "DECLARATION_V2.DECLARATION_REF",
+ "#MRNFORMOVEMENT",
+ ],
+ }
|
Tables with leading digits are not properly recognized
https://github.com/macbre/index-digest/runs/2567720826
```
ERROR indexdigest.linters.linter_0006_not_used_columns_and_tables:linter_0006_not_used_columns_and_tables.py:91 Unable to extract tables and columns used from the query: SELECT test
ERROR indexdigest.linters.linter_0006_not_used_columns_and_tables:linter_0006_not_used_columns_and_tables.py:91 Unable to extract tables and columns used from the query: SELECT t.val as value, count(*) FROM 0020_big_table as t WHERE id BETWEEN 10 AND 20 GROUP BY val
```
```python
Parser("SELECT t.val as value, count(*) FROM 0020_big_table as t WHERE id BETWEEN 10 AND 20 GROUP BY val").tables # []
```
However:
```python
Parser("SELECT t.val as value, count(*) FROM `0020_big_table` as t WHERE id BETWEEN 10 AND 20 GROUP BY val").tables # ['0020_big_table']
```
> **Identifiers may begin with a digit** but unless quoted may not consist solely of digits.
>
> https://dev.mysql.com/doc/refman/8.0/en/identifiers.html
|
0.0
|
c8062f08684268f5e1822b52ce7f58567982737b
|
[
"test/test_getting_columns.py::test_columns_with_complex_aliases_same_as_columns",
"test/test_getting_columns.py::test_columns_with_aliases_same_as_columns",
"test/test_getting_tables.py::test_get_tables_with_leading_digits"
] |
[
"test/test_getting_columns.py::test_cast_and_convert_functions",
"test/test_getting_columns.py::test_queries_with_null_conditions",
"test/test_getting_columns.py::test_queries_with_distinct",
"test/test_getting_columns.py::test_joins",
"test/test_getting_columns.py::test_getting_columns",
"test/test_getting_columns.py::test_columns_with_order_by",
"test/test_getting_columns.py::test_update_and_replace",
"test/test_getting_columns.py::test_complex_queries_columns",
"test/test_getting_columns.py::test_columns_with_comments",
"test/test_getting_columns.py::test_columns_with_keyword_aliases",
"test/test_getting_columns.py::test_columns_and_sql_functions",
"test/test_getting_columns.py::test_columns_starting_with_keywords",
"test/test_getting_columns.py::test_columns_with_keywords_parts",
"test/test_getting_tables.py::test_simple_queries_tables",
"test/test_getting_tables.py::test_complex_query_tables",
"test/test_getting_tables.py::test_joins",
"test/test_getting_tables.py::test_quoted_names",
"test/test_getting_tables.py::test_update_and_replace",
"test/test_getting_tables.py::test_order_bys",
"test/test_getting_tables.py::test_three_part_qualified_names",
"test/test_getting_tables.py::test_insert_queries",
"test/test_getting_tables.py::test_select_aliases",
"test/test_getting_tables.py::test_table_name_with_group_by",
"test/test_getting_tables.py::test_datasets",
"test/test_getting_tables.py::test_queries_with_distinct",
"test/test_getting_tables.py::test_table_names_with_dashes",
"test/test_getting_tables.py::test_unions",
"test/test_getting_tables.py::test_with_brackets",
"test/test_getting_tables.py::test_db2_query",
"test/test_unions.py::test_union"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-13 08:43:58+00:00
|
mit
| 3,666 |
|
macbre__sql-metadata-143
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index 887c505..10be534 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -5,8 +5,8 @@ import re
from typing import Dict, List, Optional, Tuple, Union
import sqlparse
-from sqlparse.sql import TokenList
-from sqlparse.tokens import Whitespace
+from sqlparse.sql import Token
+from sqlparse.tokens import Name, Number, Whitespace
from sql_metadata.generalizator import Generalizator
from sql_metadata.keywords_lists import (
@@ -18,7 +18,7 @@ from sql_metadata.keywords_lists import (
TABLE_ADJUSTMENT_KEYWORDS,
WITH_ENDING_KEYWORDS,
)
-from sql_metadata.token import SQLToken, EmptyToken
+from sql_metadata.token import EmptyToken, SQLToken
from sql_metadata.utils import UniqueList
@@ -60,6 +60,8 @@ class Parser: # pylint: disable=R0902
self._is_in_with_block = False
self._with_columns_candidates = dict()
+ self.sqlparse_tokens = None
+
@property
def query(self) -> str:
"""
@@ -81,7 +83,8 @@ class Parser: # pylint: disable=R0902
if not parsed:
return tokens
- sqlparse_tokens = TokenList(parsed[0].tokens).flatten()
+ self.sqlparse_tokens = parsed[0].tokens
+ sqlparse_tokens = self._flatten_sqlparse()
non_empty_tokens = [
token for token in sqlparse_tokens if token.ttype is not Whitespace
]
@@ -744,13 +747,47 @@ class Parser: # pylint: disable=R0902
# 0. remove newlines
query = self._raw_query.replace("\n", " ")
# 1. remove quotes "
- query = query.replace('"', "")
+ query = query.replace('"', "`")
# 2. `database`.`table` notation -> database.table
query = re.sub(r"`([^`]+)`\.`([^`]+)`", r"\1.\2", query)
return query
+ def _flatten_sqlparse(self):
+ for token in self.sqlparse_tokens:
+ # sqlparse returns mysql digit starting identifiers as group
+ # check https://github.com/andialbrecht/sqlparse/issues/337
+ is_grouped_mysql_digit_name = (
+ token.is_group
+ and len(token.tokens) == 2
+ and token.tokens[0].ttype is Number.Integer
+ and (
+ token.tokens[1].is_group and token.tokens[1].tokens[0].ttype is Name
+ )
+ )
+ if token.is_group and not is_grouped_mysql_digit_name:
+ yield from token.flatten()
+ elif is_grouped_mysql_digit_name:
+ # we have digit starting name
+ new_tok = Token(
+ value=f"{token.tokens[0].normalized}"
+ f"{token.tokens[1].tokens[0].normalized}",
+ ttype=token.tokens[1].tokens[0].ttype,
+ )
+ new_tok.parent = token.parent
+ yield new_tok
+ if len(token.tokens[1].tokens) > 1:
+ # unfortunately there might be nested groups
+ remaining_tokens = token.tokens[1].tokens[1:]
+ for tok in remaining_tokens:
+ if tok.is_group:
+ yield from tok.flatten()
+ else:
+ yield tok
+ else:
+ yield token
+
@property
def _is_create_table_query(self) -> bool:
"""
|
macbre/sql-metadata
|
e8b2a73da290e893fcef268c1c6baaef811f5479
|
diff --git a/test/test_getting_columns.py b/test/test_getting_columns.py
index 26c0ddd..9ba60d4 100644
--- a/test/test_getting_columns.py
+++ b/test/test_getting_columns.py
@@ -250,3 +250,16 @@ def test_columns_and_sql_functions():
assert Parser(
"select count(col)+max(col2)+ min(col3)+ count(distinct col4) + custom_func(col5) from dual"
).columns == ["col", "col2", "col3", "col4", "col5"]
+
+
+def test_columns_starting_with_keywords():
+ query = "SELECT `schema_name`, full_table_name, `column_name`, `catalog_name`, `table_name`, column_length, annotation FROM corporate.all_tables"
+ assert Parser(query).columns == [
+ "schema_name",
+ "full_table_name",
+ "column_name",
+ "catalog_name",
+ "table_name",
+ "column_length",
+ "annotation",
+ ]
diff --git a/test/test_getting_tables.py b/test/test_getting_tables.py
index e53744a..9657e64 100644
--- a/test/test_getting_tables.py
+++ b/test/test_getting_tables.py
@@ -432,9 +432,15 @@ def test_get_tables_with_leading_digits():
assert ["0020_big_table"] == Parser(
"SELECT t.val as value, count(*) FROM `0020_big_table`"
).tables
+ assert ["0020_big_table"] == Parser(
+ 'SELECT t.val as value, count(*) FROM "0020_big_table" as t WHERE id BETWEEN 10 AND 20 GROUP BY val'
+ ).tables
assert ["0020_big_table"] == Parser(
"SELECT t.val as value, count(*) FROM 0020_big_table as t WHERE id BETWEEN 10 AND 20 GROUP BY val"
).tables
+ assert ["0020_big_table"] == Parser(
+ "SELECT t.val as value, count(*) FROM `0020_big_table` as t WHERE id BETWEEN 10 AND 20 GROUP BY val"
+ ).tables
assert ["0020_big_table"] == Parser(
"SELECT t.val as value, count(*) FROM 0020_big_table"
).tables
diff --git a/test/test_unions.py b/test/test_unions.py
new file mode 100644
index 0000000..f0506be
--- /dev/null
+++ b/test/test_unions.py
@@ -0,0 +1,81 @@
+from sql_metadata import Parser
+
+
+def test_union():
+ query = """
+ SELECT
+ACCOUNTING_ENTITY.VERSION as "accountingEntityVersion",
+ACCOUNTING_ENTITY.ACTIVE as "active",
+ACCOUNTING_ENTITY.CATEGORY as "category",
+ACCOUNTING_ENTITY.CREATION_DATE as "creationDate",
+ACCOUNTING_ENTITY.DESCRIPTION as "description",
+ACCOUNTING_ENTITY.ID as "accountingEntityId",
+ACCOUNTING_ENTITY.MINIMAL_REMAINDER as "minimalRemainder",
+ACCOUNTING_ENTITY.REMAINDER as "remainder",
+ACCOUNTING_ENTITY.SYSTEM_TYPE_ID as "aeSystemTypeId",
+ACCOUNTING_ENTITY.DATE_CREATION as "dateCreation",
+ACCOUNTING_ENTITY.DATE_LAST_MODIFICATION as "dateLastModification",
+ACCOUNTING_ENTITY.USER_CREATION as "userCreation",
+ACCOUNTING_ENTITY.USER_LAST_MODIFICATION as "userLastModification"
+FROM ACCOUNTING_ENTITY
+WHERE ACCOUNTING_ENTITY.ID IN (
+SELECT DPD.ACCOUNTING_ENTITY_ID AS "ACCOUNTINGENTITYID" FROM DEBT D
+INNER JOIN DUTY_PER_DEBT DPD ON DPD.DEBT_ID = D.ID
+INNER JOIN DECLARATION_V2 DV2 ON DV2.ID = D.DECLARATION_ID
+WHERE DV2.DECLARATION_REF = #MRNFORMOVEMENT
+UNION
+SELECT BX.ACCOUNTING_ENTITY_ID AS "ACCOUNTINGENTITYID" FROM BENELUX BX
+INNER JOIN DECLARATION_V2 DV2 ON DV2.ID = BX.DECLARATION_ID
+WHERE DV2.DECLARATION_REF = #MRNFORMOVEMENT
+UNION
+SELECT CA4D.ACCOUNTING_ENTITY_ID AS "ACCOUNTINGENTITYID" FROM RESERVATION R
+INNER JOIN CA4_RESERVATIONS_DECLARATION CA4D ON CA4D.ID = R.CA4_ID
+INNER JOIN DECLARATION_V2 DV2 ON DV2.ID = R.DECLARATION_ID
+WHERE DV2.DECLARATION_REF = #MRNFORMOVEMENT)
+ """
+
+ parser = Parser(query)
+ assert parser.tables == [
+ "ACCOUNTING_ENTITY",
+ "DEBT",
+ "DUTY_PER_DEBT",
+ "DECLARATION_V2",
+ "BENELUX",
+ "RESERVATION",
+ "CA4_RESERVATIONS_DECLARATION",
+ ]
+ assert parser.columns_dict == {
+ "join": [
+ "DUTY_PER_DEBT.DEBT_ID",
+ "DEBT.ID",
+ "DECLARATION_V2.ID",
+ "DEBT.DECLARATION_ID",
+ "BENELUX.DECLARATION_ID",
+ "CA4_RESERVATIONS_DECLARATION.ID",
+ "RESERVATION.CA4_ID",
+ "RESERVATION.DECLARATION_ID",
+ ],
+ "select": [
+ "ACCOUNTING_ENTITY.VERSION",
+ "ACCOUNTING_ENTITY.ACTIVE",
+ "ACCOUNTING_ENTITY.CATEGORY",
+ "ACCOUNTING_ENTITY.CREATION_DATE",
+ "ACCOUNTING_ENTITY.DESCRIPTION",
+ "ACCOUNTING_ENTITY.ID",
+ "ACCOUNTING_ENTITY.MINIMAL_REMAINDER",
+ "ACCOUNTING_ENTITY.REMAINDER",
+ "ACCOUNTING_ENTITY.SYSTEM_TYPE_ID",
+ "ACCOUNTING_ENTITY.DATE_CREATION",
+ "ACCOUNTING_ENTITY.DATE_LAST_MODIFICATION",
+ "ACCOUNTING_ENTITY.USER_CREATION",
+ "ACCOUNTING_ENTITY.USER_LAST_MODIFICATION",
+ "DUTY_PER_DEBT.ACCOUNTING_ENTITY_ID",
+ "BENELUX.ACCOUNTING_ENTITY_ID",
+ "CA4_RESERVATIONS_DECLARATION.ACCOUNTING_ENTITY_ID",
+ ],
+ "where": [
+ "ACCOUNTING_ENTITY.ID",
+ "DECLARATION_V2.DECLARATION_REF",
+ "#MRNFORMOVEMENT",
+ ],
+ }
|
Tables with leading digits are not properly recognized
https://github.com/macbre/index-digest/runs/2567720826
```
ERROR indexdigest.linters.linter_0006_not_used_columns_and_tables:linter_0006_not_used_columns_and_tables.py:91 Unable to extract tables and columns used from the query: SELECT test
ERROR indexdigest.linters.linter_0006_not_used_columns_and_tables:linter_0006_not_used_columns_and_tables.py:91 Unable to extract tables and columns used from the query: SELECT t.val as value, count(*) FROM 0020_big_table as t WHERE id BETWEEN 10 AND 20 GROUP BY val
```
```python
Parser("SELECT t.val as value, count(*) FROM 0020_big_table as t WHERE id BETWEEN 10 AND 20 GROUP BY val").tables # []
```
However:
```python
Parser("SELECT t.val as value, count(*) FROM `0020_big_table` as t WHERE id BETWEEN 10 AND 20 GROUP BY val").tables # ['0020_big_table']
```
> **Identifiers may begin with a digit** but unless quoted may not consist solely of digits.
>
> https://dev.mysql.com/doc/refman/8.0/en/identifiers.html
|
0.0
|
e8b2a73da290e893fcef268c1c6baaef811f5479
|
[
"test/test_getting_tables.py::test_get_tables_with_leading_digits"
] |
[
"test/test_getting_columns.py::test_cast_and_convert_functions",
"test/test_getting_columns.py::test_queries_with_null_conditions",
"test/test_getting_columns.py::test_queries_with_distinct",
"test/test_getting_columns.py::test_joins",
"test/test_getting_columns.py::test_getting_columns",
"test/test_getting_columns.py::test_columns_with_order_by",
"test/test_getting_columns.py::test_update_and_replace",
"test/test_getting_columns.py::test_complex_queries_columns",
"test/test_getting_columns.py::test_columns_with_comments",
"test/test_getting_columns.py::test_columns_with_keyword_aliases",
"test/test_getting_columns.py::test_columns_and_sql_functions",
"test/test_getting_columns.py::test_columns_starting_with_keywords",
"test/test_getting_tables.py::test_simple_queries_tables",
"test/test_getting_tables.py::test_complex_query_tables",
"test/test_getting_tables.py::test_joins",
"test/test_getting_tables.py::test_quoted_names",
"test/test_getting_tables.py::test_update_and_replace",
"test/test_getting_tables.py::test_order_bys",
"test/test_getting_tables.py::test_three_part_qualified_names",
"test/test_getting_tables.py::test_insert_queries",
"test/test_getting_tables.py::test_select_aliases",
"test/test_getting_tables.py::test_table_name_with_group_by",
"test/test_getting_tables.py::test_datasets",
"test/test_getting_tables.py::test_queries_with_distinct",
"test/test_getting_tables.py::test_table_names_with_dashes",
"test/test_getting_tables.py::test_unions",
"test/test_getting_tables.py::test_with_brackets",
"test/test_getting_tables.py::test_db2_query",
"test/test_unions.py::test_union"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-17 18:08:38+00:00
|
mit
| 3,667 |
|
macbre__sql-metadata-164
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index 869720b..ed35226 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -1,6 +1,7 @@
"""
This module provides SQL query parsing functions
"""
+import logging
import re
from typing import Dict, List, Optional, Tuple, Union
@@ -29,6 +30,8 @@ class Parser: # pylint: disable=R0902
"""
def __init__(self, sql: str = "") -> None:
+ self._logger = logging.getLogger(self.__class__.__name__)
+
self._raw_query = sql
self._query = self._preprocess_query()
self._query_type = None
@@ -85,12 +88,22 @@ class Parser: # pylint: disable=R0902
return self._query_type
if not self._tokens:
_ = self.tokens
- if self._tokens[0].normalized in ["CREATE", "ALTER"]:
- switch = self._tokens[0].normalized + self._tokens[1].normalized
+
+ # remove comment tokens to not confuse the logic below (see #163)
+ tokens: List[SQLToken] = list(
+ filter(lambda token: not token.is_comment, self._tokens or [])
+ )
+
+ if not tokens:
+ raise ValueError("Empty queries are not supported!")
+
+ if tokens[0].normalized in ["CREATE", "ALTER"]:
+ switch = tokens[0].normalized + tokens[1].normalized
else:
- switch = self._tokens[0].normalized
+ switch = tokens[0].normalized
self._query_type = SUPPORTED_QUERY_TYPES.get(switch, "UNSUPPORTED")
if self._query_type == "UNSUPPORTED":
+ self._logger.error("Not supported query type: %s", self._raw_query)
raise ValueError("Not supported query type!")
return self._query_type
|
macbre/sql-metadata
|
fd54bcb7b29bddb097e24cbe578415fb5a79cf95
|
diff --git a/test/test_getting_tables.py b/test/test_getting_tables.py
index 24ca3c4..bdf40c9 100644
--- a/test/test_getting_tables.py
+++ b/test/test_getting_tables.py
@@ -444,3 +444,15 @@ def test_get_tables_with_leading_digits():
assert ["0020_big_table"] == Parser(
"SELECT t.val as value, count(*) FROM 0020_big_table"
).tables
+
+
+def test_insert_ignore_with_comments():
+ queries = [
+ "INSERT IGNORE /* foo */ INTO bar VALUES (1, '123', '2017-01-01');",
+ "/* foo */ INSERT IGNORE INTO bar VALUES (1, '123', '2017-01-01');"
+ "-- foo\nINSERT IGNORE INTO bar VALUES (1, '123', '2017-01-01');"
+ "# foo\nINSERT IGNORE INTO bar VALUES (1, '123', '2017-01-01');",
+ ]
+
+ for query in queries:
+ assert ["bar"] == Parser(query).tables
diff --git a/test/test_query_type.py b/test/test_query_type.py
new file mode 100644
index 0000000..6658ae2
--- /dev/null
+++ b/test/test_query_type.py
@@ -0,0 +1,50 @@
+import pytest
+
+from sql_metadata import Parser
+
+
+def test_insert_query():
+ queries = [
+ "INSERT IGNORE /* foo */ INTO bar VALUES (1, '123', '2017-01-01');",
+ "/* foo */ INSERT IGNORE INTO bar VALUES (1, '123', '2017-01-01');"
+ "-- foo\nINSERT IGNORE INTO bar VALUES (1, '123', '2017-01-01');"
+ "# foo\nINSERT IGNORE INTO bar VALUES (1, '123', '2017-01-01');",
+ ]
+
+ for query in queries:
+ assert "INSERT" == Parser(query).query_type
+
+
+def test_select_query():
+ queries = [
+ "SELECT /* foo */ foo FROM bar",
+ "/* foo */ SELECT foo FROM bar"
+ "-- foo\nSELECT foo FROM bar"
+ "# foo\nSELECT foo FROM bar",
+ ]
+
+ for query in queries:
+ assert "SELECT" == Parser(query).query_type
+
+
+def test_unsupported_query():
+ queries = [
+ "FOO BAR",
+ "DO SOMETHING",
+ ]
+
+ for query in queries:
+ with pytest.raises(ValueError) as ex:
+ _ = Parser(query).query_type
+
+ assert "Not supported query type!" in str(ex.value)
+
+
+def test_empty_query():
+ queries = ["", "/* empty query */"]
+
+ for query in queries:
+ with pytest.raises(ValueError) as ex:
+ _ = Parser(query).query_type
+
+ assert "Empty queries are not supported!" in str(ex.value)
diff --git a/test/test_values.py b/test/test_values.py
index f63c8c3..5def787 100644
--- a/test/test_values.py
+++ b/test/test_values.py
@@ -24,8 +24,9 @@ def test_getting_values():
}
parser = Parser(
- "INSERT IGNORE INTO `0070_insert_ignore_table` VALUES (9, 2.15, '123', '2017-01-01');"
+ "/* method */ INSERT IGNORE INTO `0070_insert_ignore_table` VALUES (9, 2.15, '123', '2017-01-01');"
)
+ assert parser.query_type == "INSERT"
assert parser.values == [9, 2.15, "123", "2017-01-01"]
assert parser.values_dict == {
"column_1": 9,
|
Parser raises an exception for not supported query type when query starts with a comment
When working on [index-digest's PR](https://github.com/macbre/index-digest/pull/244) I have found the following issue.
An example call:
```python
tables = get_query_tables("/* foo */ INSERT IGNORE INTO `0070_insert_ignore` VALUES (9, '123', '2017-01-01');")
```
Error raised:
```
/* foo */ INSERT IGNORE INTO `0070_insert_ignore` VALUES (9, '123', '2017-01-01');
if self._query_type == "UNSUPPORTED":
> raise ValueError("Not supported query type!")
E ValueError: Not supported query type!
env/lib/python3.9/site-packages/sql_metadata/parser.py:94: ValueError
```
Additionally, we should also log the query itself that caused this exception to be raised.
|
0.0
|
fd54bcb7b29bddb097e24cbe578415fb5a79cf95
|
[
"test/test_getting_tables.py::test_insert_ignore_with_comments",
"test/test_query_type.py::test_insert_query",
"test/test_query_type.py::test_select_query",
"test/test_query_type.py::test_empty_query",
"test/test_values.py::test_getting_values"
] |
[
"test/test_getting_tables.py::test_simple_queries_tables",
"test/test_getting_tables.py::test_complex_query_tables",
"test/test_getting_tables.py::test_joins",
"test/test_getting_tables.py::test_quoted_names",
"test/test_getting_tables.py::test_update_and_replace",
"test/test_getting_tables.py::test_order_bys",
"test/test_getting_tables.py::test_three_part_qualified_names",
"test/test_getting_tables.py::test_insert_queries",
"test/test_getting_tables.py::test_select_aliases",
"test/test_getting_tables.py::test_table_name_with_group_by",
"test/test_getting_tables.py::test_datasets",
"test/test_getting_tables.py::test_queries_with_distinct",
"test/test_getting_tables.py::test_table_names_with_dashes",
"test/test_getting_tables.py::test_unions",
"test/test_getting_tables.py::test_with_brackets",
"test/test_getting_tables.py::test_db2_query",
"test/test_getting_tables.py::test_get_tables_with_leading_digits",
"test/test_query_type.py::test_unsupported_query"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-10 13:49:00+00:00
|
mit
| 3,668 |
|
macbre__sql-metadata-203
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index ec8a823..0d28390 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -66,7 +66,7 @@ class Parser: # pylint: disable=R0902
self._subquery_level = 0
self._nested_level = 0
self._parenthesis_level = 0
- self._open_parentheses = []
+ self._open_parentheses: List[SQLToken] = []
self._aliases_to_check = None
self._is_in_nested_function = False
self._is_in_with_block = False
@@ -326,10 +326,11 @@ class Parser: # pylint: disable=R0902
and not token.previous_token.is_nested_function_start
and token.is_alias_definition
):
- if token.previous_token.normalized == "AS":
- token_check = token.get_nth_previous(2)
- else:
- token_check = token.previous_token
+ token_check = (
+ token.get_nth_previous(2)
+ if token.previous_token.normalized == "AS"
+ else token.previous_token
+ )
if token_check.is_column_definition_end:
# nested subquery like select a, (select a as b from x) as column
start_token = token.find_nearest_token(
@@ -348,6 +349,13 @@ class Parser: # pylint: disable=R0902
alias_of = self._find_all_columns_between_tokens(
start_token=start_token, end_token=token
)
+ elif token_check.is_partition_clause_end:
+ start_token = token.find_nearest_token(
+ True, value_attribute="is_partition_clause_start"
+ )
+ alias_of = self._find_all_columns_between_tokens(
+ start_token=start_token, end_token=token
+ )
elif token.is_in_with_columns:
# columns definition is to the right in subquery
# we are in: with with_name (<aliases>) as (subquery)
@@ -916,6 +924,8 @@ class Parser: # pylint: disable=R0902
or token.get_nth_previous(4).normalized == "TABLE"
):
token.is_create_table_columns_declaration_start = True
+ elif token.previous_token.normalized == "OVER":
+ token.is_partition_clause_start = True
else:
# nested function
token.is_nested_function_start = True
@@ -938,6 +948,8 @@ class Parser: # pylint: disable=R0902
token.is_with_query_end = True
elif last_open_parenthesis.is_create_table_columns_declaration_start:
token.is_create_table_columns_declaration_end = True
+ elif last_open_parenthesis.is_partition_clause_start:
+ token.is_partition_clause_end = True
else:
token.is_nested_function_end = True
self._nested_level -= 1
@@ -1010,12 +1022,15 @@ class Parser: # pylint: disable=R0902
return query
- @staticmethod
- def _determine_last_relevant_keyword(token: SQLToken, last_keyword: str):
+ def _determine_last_relevant_keyword(self, token: SQLToken, last_keyword: str):
if token.is_keyword and "".join(token.normalized.split()) in RELEVANT_KEYWORDS:
if not (
token.normalized == "FROM"
and token.get_nth_previous(3).normalized == "EXTRACT"
+ ) and not (
+ token.normalized == "ORDERBY"
+ and len(self._open_parentheses) > 0
+ and self._open_parentheses[-1].is_partition_clause_start
):
last_keyword = token.normalized
return last_keyword
diff --git a/sql_metadata/token.py b/sql_metadata/token.py
index 01ee236..bb1eb37 100644
--- a/sql_metadata/token.py
+++ b/sql_metadata/token.py
@@ -80,6 +80,8 @@ class SQLToken: # pylint: disable=R0902
self.is_column_definition_end = False
self.is_create_table_columns_declaration_start = False
self.is_create_table_columns_declaration_end = False
+ self.is_partition_clause_start = False
+ self.is_partition_clause_end = False
def __str__(self):
"""
@@ -182,6 +184,7 @@ class SQLToken: # pylint: disable=R0902
and (
self.last_keyword_normalized == "SELECT"
or self.previous_token.is_column_definition_end
+ or self.previous_token.is_partition_clause_end
)
and not self.previous_token.is_comment
)
|
macbre/sql-metadata
|
ccacebfed43b89f298906f8eda7e646cdcab84fd
|
diff --git a/test/test_mssql_server.py b/test/test_mssql_server.py
index a565ab1..6c5e24c 100644
--- a/test/test_mssql_server.py
+++ b/test/test_mssql_server.py
@@ -17,56 +17,50 @@ def test_sql_server_cte():
*
FROM x
JOIN y ON x.a = y.a
- """
+ """
)
assert parser.tables == ["n", "y"]
assert parser.with_names == ["x"]
assert parser.with_queries == {"x": "SELECT * FROM n"}
assert parser.columns == ["*", "a", "y.a"]
- assert (
- Parser(
- """
- WITH x AS (
- SELECT * FROM n
- )
- SELECT
- *
- FROM x
- JOIN y ON x.a = y.a
- """
- ).tables
- == ["n", "y"]
+ parser = Parser(
+ """
+ WITH x AS (
+ SELECT * FROM n
+ )
+ SELECT
+ *
+ FROM x
+ JOIN y ON x.a = y.a
+ """
)
+ assert parser.tables == ["n", "y"]
- assert (
- Parser(
- """
- WITH foo AS (
- SELECT * FROM n
- )
- UPDATE z from foo set z.q = foo.y
- """
- ).tables
- == ["n", "z"]
+ parser = Parser(
+ """
+ WITH foo AS (
+ SELECT * FROM n
+ )
+ UPDATE z from foo set z.q = foo.y
+ """
)
+ assert parser.tables == ["n", "z"]
- assert (
- Parser(
- """
- WITH foo AS (
- SELECT * FROM tab
- )
- DELETE FROM z JOIN foo ON z.a = foo.a
- """.strip()
- ).tables
- == ["tab", "z"]
+ parser = Parser(
+ """
+ WITH foo AS (
+ SELECT * FROM tab
+ )
+ DELETE FROM z JOIN foo ON z.a = foo.a
+ """
)
+ assert parser.tables == ["tab", "z"]
def test_sql_server_cte_sales_by_year():
sales_query = """
-WITH cte_sales AS (
+ WITH cte_sales AS (
SELECT
staff_id,
COUNT(*) order_count
@@ -76,12 +70,12 @@ WITH cte_sales AS (
YEAR(order_date) = 2018
GROUP BY
staff_id
-)
-SELECT
+ )
+ SELECT
AVG(order_count) average_orders_by_staff
-FROM
+ FROM
cte_sales;
- """.strip()
+ """
parser = Parser(sales_query)
assert parser.tables == ["sales.orders"]
@@ -104,3 +98,67 @@ FROM
"select": ["staff_id", "*"],
"where": ["order_date"],
}
+
+
+def test_partition_over_with_rank_and_one_order():
+ """Test for #204"""
+ parser = Parser(
+ """
+ select t.RANKED, t.RANKED_two, t.test from (
+ SELECT
+ RANK() OVER (PARTITION BY col_one ORDER BY col_two) RANKED,
+ RANK() OVER (PARTITION BY col_one ORDER BY col_two) as RANKED_two,
+ col_three as test
+ FROM nice_table) as t
+ where t.RANKED = 1
+ and t.RANKED_two = 2
+ order by test
+ """
+ )
+ assert parser.tables == ["nice_table"]
+ assert parser.columns_aliases_names == ["RANKED", "RANKED_two", "test"]
+ assert parser.columns_aliases == {
+ "RANKED": ["col_one", "col_two"],
+ "RANKED_two": ["col_one", "col_two"],
+ "test": "col_three",
+ }
+ assert parser.columns == ["col_one", "col_two", "col_three"]
+ assert parser.columns_dict == {
+ "order_by": ["col_three"],
+ "select": ["col_one", "col_two", "col_three"],
+ "where": ["col_one", "col_two"],
+ }
+
+
+def test_partition_over_with_row_number_and_many_orders():
+ """Test for #204"""
+ parser = Parser(
+ """
+ select t.row_no, t.row_no_two, t.test from (
+ SELECT
+ ROW_NUMBER() OVER (
+ PARTITION BY col_one
+ ORDER BY col_two, col_three, col_four) row_no,
+ ROW_NUMBER() OVER (
+ PARTITION BY col_one
+ ORDER BY col_two, col_three) as row_no_two,
+ col_three as test
+ FROM nice_table) as t
+ where t.row_no = 1
+ and t.row_no_two = 2
+ order by t.row_no
+ """
+ )
+ assert parser.tables == ["nice_table"]
+ assert parser.columns_aliases_names == ["row_no", "row_no_two", "test"]
+ assert parser.columns_aliases == {
+ "row_no": ["col_one", "col_two", "col_three", "col_four"],
+ "row_no_two": ["col_one", "col_two", "col_three"],
+ "test": "col_three",
+ }
+ assert parser.columns == ["col_one", "col_two", "col_three", "col_four"]
+ assert parser.columns_dict == {
+ "order_by": ["col_one", "col_two", "col_three", "col_four"],
+ "select": ["col_one", "col_two", "col_three", "col_four"],
+ "where": ["col_one", "col_two", "col_three", "col_four"],
+ }
|
Alias and column names from subquery are case sensitive when used as conditions in outer query
It seem like case must match on alias/column names from subquery when they are used as a condition in the outer query, *only* if the subquery alias is used in the condition.
for example, this works:
```sql
select tEsT
from (select a as TeSt from nowhere) as t
where tEsT = 'asdf'
```
but this fails:
```sql
select tEsT
from (select a as TeSt from nowhere) as t
where t.tEsT = 'asdf'
```
with an error
```error
ValueError: 'tEsT' is not in list
```
I think it can be fixed by ignoring case when checking column alias.
In Parser.py, `_resolve_nested_query, we can change
```py
if column_name in subparser.columns_aliases_names:
```
to
```py
if column_name.lower() in [x.lower() for x in subparser.columns_aliases_names]:
```
or
```py
if any(filter(lambda x: x.lower() == column_name.lower(),subparser.columns_aliases_names)):
```
... not sure of a nicer way to say it.
I'll put a pull request w/ a test.
|
0.0
|
ccacebfed43b89f298906f8eda7e646cdcab84fd
|
[
"test/test_mssql_server.py::test_partition_over_with_rank_and_one_order",
"test/test_mssql_server.py::test_partition_over_with_row_number_and_many_orders"
] |
[
"test/test_mssql_server.py::test_sql_server_cte",
"test/test_mssql_server.py::test_sql_server_cte_sales_by_year"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-08-12 19:43:06+00:00
|
mit
| 3,669 |
|
macbre__sql-metadata-316
|
diff --git a/sql_metadata/keywords_lists.py b/sql_metadata/keywords_lists.py
index b20e12d..3414f1c 100644
--- a/sql_metadata/keywords_lists.py
+++ b/sql_metadata/keywords_lists.py
@@ -76,6 +76,7 @@ class QueryType(str, Enum):
SELECT = "SELECT"
CREATE = "CREATE TABLE"
ALTER = "ALTER TABLE"
+ DROP = "DROP TABLE"
class TokenType(str, Enum):
@@ -102,6 +103,7 @@ SUPPORTED_QUERY_TYPES = {
"WITH": QueryType.SELECT,
"CREATETABLE": QueryType.CREATE,
"ALTERTABLE": QueryType.ALTER,
+ "DROPTABLE": QueryType.DROP,
}
# all the keywords we care for - rest is ignored in assigning
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index 03f72d8..d4fd473 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -112,7 +112,7 @@ class Parser: # pylint: disable=R0902
)
.position
)
- if tokens[index].normalized in ["CREATE", "ALTER"]:
+ if tokens[index].normalized in ["CREATE", "ALTER", "DROP"]:
switch = tokens[index].normalized + tokens[index + 1].normalized
else:
switch = tokens[index].normalized
|
macbre/sql-metadata
|
fbf93ee1f748225cbb0bfab7d22b4d01b9e7a2d3
|
diff --git a/test/test_drop_table.py b/test/test_drop_table.py
new file mode 100644
index 0000000..8764020
--- /dev/null
+++ b/test/test_drop_table.py
@@ -0,0 +1,9 @@
+from sql_metadata import Parser
+from sql_metadata.keywords_lists import QueryType
+
+
+def test_drop_table():
+ parser = Parser("DROP TABLE foo")
+ assert parser.query_type == QueryType.DROP
+ assert parser.tables == ["foo"]
+ assert parser.columns == []
diff --git a/test/test_query_type.py b/test/test_query_type.py
index 15f9fae..44b8f33 100644
--- a/test/test_query_type.py
+++ b/test/test_query_type.py
@@ -38,6 +38,16 @@ def test_delete_query():
assert "DELETE" == Parser(query.format(comment)).query_type
+def test_drop_table_query():
+ queries = [
+ "{0}DROP TABLE foo;{0}",
+ ]
+
+ for query in queries:
+ for comment in ["", "/* foo */", "\n--foo\n", "\n# foo\n"]:
+ assert "DROP TABLE" == Parser(query.format(comment)).query_type
+
+
def test_unsupported_query():
queries = [
"FOO BAR",
|
Issue with table extraction for delete and drop query type
hi,
Do you know why or when the above query type (delete, drop) should be supported? Or why it's not supported by now?
thanks.
|
0.0
|
fbf93ee1f748225cbb0bfab7d22b4d01b9e7a2d3
|
[
"test/test_drop_table.py::test_drop_table",
"test/test_query_type.py::test_drop_table_query"
] |
[
"test/test_query_type.py::test_insert_query",
"test/test_query_type.py::test_select_query",
"test/test_query_type.py::test_delete_query",
"test/test_query_type.py::test_unsupported_query",
"test/test_query_type.py::test_empty_query",
"test/test_query_type.py::test_redundant_parentheses",
"test/test_query_type.py::test_multiple_redundant_parentheses",
"test/test_query_type.py::test_multiple_redundant_parentheses_create"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-05 20:55:02+00:00
|
mit
| 3,670 |
|
macbre__sql-metadata-387
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index ab0a8fc..349f044 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -382,8 +382,12 @@ class Parser: # pylint: disable=R0902
elif token.last_keyword_normalized == "OFFSET":
# OFFSET <offset>
offset = int(token.value)
- elif token.previous_token.is_punctuation:
+ elif (
+ token.previous_token.is_punctuation
+ and token.last_keyword_normalized == "LIMIT"
+ ):
# LIMIT <offset>,<limit>
+ # enter this condition only when the limit has already been parsed
offset = limit
limit = int(token.value)
|
macbre/sql-metadata
|
e5cc1c9c9345daa6977c0c4971a1164ad1d8aa5f
|
diff --git a/test/test_limit_and_offset.py b/test/test_limit_and_offset.py
index 024e935..1fd6aae 100644
--- a/test/test_limit_and_offset.py
+++ b/test/test_limit_and_offset.py
@@ -45,3 +45,10 @@ def test_comma_separated():
"WHERE cl_type = 'page' AND cl_to = 'Spotify/Song' "
"ORDER BY cl_sortkey LIMIT 927600,200"
).limit_and_offset == (200, 927600)
+
+
+def test_with_in_condition():
+ # https://github.com/macbre/sql-metadata/issues/382
+ assert Parser(
+ "SELECT count(*) FROM aa WHERE userid IN (222,333) LIMIT 50 OFFSET 1000"
+ ).limit_and_offset == (50, 1000)
|
limit_and_offset method bug?
example query:
sqltext = Parser("SELECT count(*) FROM aa WHERE grad = 88038 AND userid in(222,333) AND type = 1 AND checkable = 'sea' LIMIT 50 OFFSET 1000").limit_and_offset
actually result(wrong):
(333, 1000)
Expected result
(50, 1000)
|
0.0
|
e5cc1c9c9345daa6977c0c4971a1164ad1d8aa5f
|
[
"test/test_limit_and_offset.py::test_with_in_condition"
] |
[
"test/test_limit_and_offset.py::test_no_limit_and_offset",
"test/test_limit_and_offset.py::test_only_limit",
"test/test_limit_and_offset.py::test_limit_and_offset",
"test/test_limit_and_offset.py::test_comma_separated"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-20 16:07:33+00:00
|
mit
| 3,671 |
|
macbre__sql-metadata-388
|
diff --git a/sql_metadata/parser.py b/sql_metadata/parser.py
index 349f044..c422846 100644
--- a/sql_metadata/parser.py
+++ b/sql_metadata/parser.py
@@ -357,6 +357,14 @@ class Parser: # pylint: disable=R0902
)
):
continue
+
+ # handle INSERT INTO ON DUPLICATE KEY UPDATE queries
+ if (
+ token.last_keyword_normalized == "UPDATE"
+ and self.query_type == "INSERT"
+ ):
+ continue
+
table_name = str(token.value.strip("`"))
token.token_type = TokenType.TABLE
tables.append(table_name)
|
macbre/sql-metadata
|
1c20ba1785c6ecff89f703eeab9e11361c368b15
|
diff --git a/test/test_getting_tables.py b/test/test_getting_tables.py
index 76c8741..d90b0ce 100644
--- a/test/test_getting_tables.py
+++ b/test/test_getting_tables.py
@@ -658,3 +658,11 @@ def test_cross_join_with_subquery():
assert parser.subqueries == {
"foobar": "SELECT * FROM bars",
}
+
+
+def test_insert_into_on_duplicate_key_ipdate():
+ assert Parser(
+ "INSERT INTO user (id, name, age)"
+ " VALUES ('user1', 'john doe', 20)"
+ " ON DUPLICATE KEY UPDATE name='john doe', age=20"
+ ).tables == ["user"]
|
Can not handle correctly with mysql upsert statement
```sql
insert into user (id, name, age)
values ('user1', 'john doe', 20)
on duplicate key
update name='john doe', age=20;
```
The correct value of `Parser(sql).tables` is `['user']`, but it returns `['user', 'name', 'age']`.
I used `0.4.3`.
|
0.0
|
1c20ba1785c6ecff89f703eeab9e11361c368b15
|
[
"test/test_getting_tables.py::test_insert_into_on_duplicate_key_ipdate"
] |
[
"test/test_getting_tables.py::test_simple_queries_tables",
"test/test_getting_tables.py::test_complex_query_tables",
"test/test_getting_tables.py::test_joins",
"test/test_getting_tables.py::test_quoted_names",
"test/test_getting_tables.py::test_update_and_replace",
"test/test_getting_tables.py::test_order_bys",
"test/test_getting_tables.py::test_three_part_qualified_names",
"test/test_getting_tables.py::test_insert_queries",
"test/test_getting_tables.py::test_select_aliases",
"test/test_getting_tables.py::test_table_name_with_group_by",
"test/test_getting_tables.py::test_datasets",
"test/test_getting_tables.py::test_queries_with_distinct",
"test/test_getting_tables.py::test_table_names_with_dashes",
"test/test_getting_tables.py::test_unions",
"test/test_getting_tables.py::test_with_brackets",
"test/test_getting_tables.py::test_db2_query",
"test/test_getting_tables.py::test_get_tables_with_leading_digits",
"test/test_getting_tables.py::test_insert_ignore_with_comments",
"test/test_getting_tables.py::test_mutli_from_aliases_without_as",
"test/test_getting_tables.py::test_tables_with_aggregation",
"test/test_getting_tables.py::test_insert_with_on_conflict",
"test/test_getting_tables.py::test_insert_with_on_conflict_set_name",
"test/test_getting_tables.py::test_with_keyword_in_joins",
"test/test_getting_tables.py::test_getting_proper_tables_with_keyword_aliases",
"test/test_getting_tables.py::test_cross_join_with_subquery"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-04-20 17:35:47+00:00
|
mit
| 3,672 |
|
macisamuele__language-formatters-pre-commit-hooks-160
|
diff --git a/language_formatters_pre_commit_hooks/pretty_format_toml.py b/language_formatters_pre_commit_hooks/pretty_format_toml.py
index 6b41911..17f0309 100644
--- a/language_formatters_pre_commit_hooks/pretty_format_toml.py
+++ b/language_formatters_pre_commit_hooks/pretty_format_toml.py
@@ -20,8 +20,25 @@ def pretty_format_toml(argv: typing.Optional[typing.List[str]] = None) -> int:
dest="autofix",
help="Automatically fixes encountered not-pretty-formatted files",
)
-
+ parser.add_argument(
+ "--indent",
+ type=int,
+ default="2",
+ help="The number of spaces to be used as delimiter for indentation level (Default: %(default)s)",
+ )
parser.add_argument("filenames", nargs="*", help="Filenames to fix")
+ parser.add_argument(
+ "--trailing-commas",
+ action="store_true",
+ dest="trailing_commas",
+ help="Add trailing commas to inline arrays",
+ )
+ parser.add_argument(
+ "--no-sort",
+ action="store_true",
+ dest="no_sort",
+ help="Don't sort keys",
+ )
args = parser.parse_args(argv)
status = 0
@@ -39,11 +56,11 @@ def pretty_format_toml(argv: typing.Optional[typing.List[str]] = None) -> int:
inline=True,
block=True,
),
- sort_config=SortConfiguration(tables=True),
+ sort_config=SortConfiguration(tables=not args.no_sort, table_keys=not args.no_sort),
format_config=FormattingConfiguration(
spaces_before_inline_comment=2,
- spaces_indent_inline_array=2,
- trailing_comma_inline_array=False,
+ spaces_indent_inline_array=args.indent,
+ trailing_comma_inline_array=args.trailing_commas,
),
).sorted()
|
macisamuele/language-formatters-pre-commit-hooks
|
8ced6191809ced6507e325768a1d75fcc96cb568
|
diff --git a/test-data/pretty_format_toml/indent2-pretty-formatted.toml b/test-data/pretty_format_toml/indent2-pretty-formatted.toml
new file mode 100644
index 0000000..6963c82
--- /dev/null
+++ b/test-data/pretty_format_toml/indent2-pretty-formatted.toml
@@ -0,0 +1,4 @@
+dependencies = [
+ "numpy",
+ "scikit-learn"
+]
diff --git a/test-data/pretty_format_toml/indent4-pretty-formatted.toml b/test-data/pretty_format_toml/indent4-pretty-formatted.toml
new file mode 100644
index 0000000..dde01fe
--- /dev/null
+++ b/test-data/pretty_format_toml/indent4-pretty-formatted.toml
@@ -0,0 +1,4 @@
+dependencies = [
+ "numpy",
+ "scikit-learn"
+]
diff --git a/test-data/pretty_format_toml/no-sort-pretty-formatted.toml b/test-data/pretty_format_toml/no-sort-pretty-formatted.toml
new file mode 100644
index 0000000..f203575
--- /dev/null
+++ b/test-data/pretty_format_toml/no-sort-pretty-formatted.toml
@@ -0,0 +1,4 @@
+[project]
+name = "example"
+version = "1.0.0"
+description = "Example"
diff --git a/test-data/pretty_format_toml/sorted-pretty-formatted.toml b/test-data/pretty_format_toml/sorted-pretty-formatted.toml
new file mode 100644
index 0000000..79efec4
--- /dev/null
+++ b/test-data/pretty_format_toml/sorted-pretty-formatted.toml
@@ -0,0 +1,4 @@
+[project]
+description = "Example"
+name = "example"
+version = "1.0.0"
diff --git a/tests/pretty_format_toml_test.py b/tests/pretty_format_toml_test.py
index a20ef8f..e5f8082 100644
--- a/tests/pretty_format_toml_test.py
+++ b/tests/pretty_format_toml_test.py
@@ -30,6 +30,21 @@ def test_pretty_format_toml(filename, expected_retval):
assert pretty_format_toml([filename]) == expected_retval
[email protected](
+ ("filename", "args", "expected_retval"),
+ (
+ ("indent2-pretty-formatted.toml", [], 0),
+ ("indent2-pretty-formatted.toml", ["--indent=4"], 1),
+ ("indent4-pretty-formatted.toml", [], 1),
+ ("indent4-pretty-formatted.toml", ["--indent=4"], 0),
+ ("no-sort-pretty-formatted.toml", ["--no-sort"], 0),
+ ("no-sort-pretty-formatted.toml", [], 1),
+ ),
+)
+def test_pretty_format_toml_custom_cli_arguments(filename, args, expected_retval):
+ assert pretty_format_toml([filename] + args) == expected_retval
+
+
def test_pretty_format_toml_autofix(tmpdir):
run_autofix_test(
tmpdir,
|
Allow trailing comma in TOML
Could trailing commas for TOML be configurable? I'd rather keep them (in fact, enforce them). Trailing commas lead to better git diff.
|
0.0
|
8ced6191809ced6507e325768a1d75fcc96cb568
|
[
"tests/pretty_format_toml_test.py::test_pretty_format_toml_custom_cli_arguments[indent2-pretty-formatted.toml-args1-1]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml_custom_cli_arguments[indent4-pretty-formatted.toml-args3-0]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml_custom_cli_arguments[no-sort-pretty-formatted.toml-args4-0]"
] |
[
"tests/pretty_format_toml_test.py::test_pretty_format_toml[invalid.toml-1]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml[pretty-formatted.toml-0]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml[not-pretty-formatted.toml-1]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml[not-pretty-formatted_fixed.toml-0]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml_custom_cli_arguments[indent2-pretty-formatted.toml-args0-0]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml_custom_cli_arguments[indent4-pretty-formatted.toml-args2-1]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml_custom_cli_arguments[no-sort-pretty-formatted.toml-args5-1]",
"tests/pretty_format_toml_test.py::test_pretty_format_toml_autofix"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-05 11:04:39+00:00
|
apache-2.0
| 3,673 |
|
macisamuele__language-formatters-pre-commit-hooks-29
|
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..0042f4e
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,3 @@
+# Ensure that go files are forced to have "\n" as new line, regardless of the platform.
+# More context on: https://github.com/macisamuele/language-formatters-pre-commit-hooks/pull/29
+*.go text eol=lf
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index 973aabe..eab6fe5 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -15,7 +15,8 @@ jobs:
fail-fast: false
matrix:
pyversion: [2.7, 3.6, 3.7]
- runs-on: ubuntu-latest
+ os: [macos-latest, ubuntu-latest, windows-latest]
+ runs-on: ${{ matrix.os }}
steps:
- name: Checkout Repo
@@ -39,8 +40,6 @@ jobs:
uses: actions/setup-go@v2
with:
go-version: 1.15.2
- - name: Create cache directory as pre-commit would do
- run: mkdir -p ${HOME}/.cache/pre-commit
- name: Install Python dependencies
run: pip install codecov tox tox-gh-actions
- name: Run Tox
@@ -83,8 +82,6 @@ jobs:
uses: actions/setup-go@v2
with:
go-version: 1.15.2
- - name: Create cache directory as pre-commit would do
- run: mkdir -p ${HOME}/.cache/pre-commit
- name: Install Python dependencies
run: pip install codecov tox
- name: Run Tox
diff --git a/language_formatters_pre_commit_hooks/pretty_format_golang.py b/language_formatters_pre_commit_hooks/pretty_format_golang.py
index 0b52011..bf4f1d9 100644
--- a/language_formatters_pre_commit_hooks/pretty_format_golang.py
+++ b/language_formatters_pre_commit_hooks/pretty_format_golang.py
@@ -10,6 +10,29 @@ from language_formatters_pre_commit_hooks.pre_conditions import golang_required
from language_formatters_pre_commit_hooks.utils import run_command
+def _get_eol_attribute():
+ """
+ Retrieve eol attribute defined for golang files
+ The method will return None in case of any error interacting with git
+ """
+ status_code, output = run_command('git check-attr -z eol -- filename.go')
+ if status_code != 0:
+ return None
+
+ try:
+ # Expected output: "filename.go\0eol\0lf\0"
+ _, _, eol, _ = output.split('\0')
+ return eol
+ except: # noqa: E722 (allow usage of bare 'except')
+ print(
+ '`git check-attr` output is not consistent to `<filename>\0<key>\0<value>\0` format: {output}'.format(
+ output=output,
+ ),
+ file=sys.stderr,
+ )
+ return None
+
+
@golang_required
def pretty_format_golang(argv=None):
parser = argparse.ArgumentParser()
@@ -43,6 +66,15 @@ def pretty_format_golang(argv=None):
', '.join(output.splitlines()),
),
)
+ if sys.platform == 'win32': # pragma: no cover
+ eol_attribute = _get_eol_attribute()
+ if eol_attribute and eol_attribute != 'lf':
+ print(
+ 'Hint: gofmt uses LF (aka `\\n`) as new line, but on Windows the default new line is CRLF (aka `\\r\\n`). '
+ 'You might want to ensure that go files are forced to use LF via `.gitattributes`. '
+ 'Example: https://github.com/macisamuele/language-formatters-pre-commit-hooks/commit/53f27fda02ead5b1b9b6a9bbd9c36bb66d229887', # noqa: E501
+ file=sys.stderr,
+ )
return status
diff --git a/language_formatters_pre_commit_hooks/utils.py b/language_formatters_pre_commit_hooks/utils.py
index d54928a..e9d1a84 100644
--- a/language_formatters_pre_commit_hooks/utils.py
+++ b/language_formatters_pre_commit_hooks/utils.py
@@ -31,9 +31,11 @@ def run_command(command):
def _base_directory():
# Extracted from pre-commit code:
# https://github.com/pre-commit/pre-commit/blob/master/pre_commit/store.py
- return os.environ.get('PRE_COMMIT_HOME') or os.path.join(
- os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
- 'pre-commit',
+ return os.path.realpath(
+ os.environ.get('PRE_COMMIT_HOME') or os.path.join(
+ os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
+ 'pre-commit',
+ ),
)
@@ -55,16 +57,18 @@ def download_url(url, file_name=None):
# via `pre-commit` as it would ensure that the directories
# are present
print('Unexisting base directory ({base_directory}). Creating it'.format(base_directory=base_directory), file=sys.stderr)
- os.mkdir(base_directory)
+ os.makedirs(base_directory)
print("Downloading {url}".format(url=url), file=sys.stderr)
r = requests.get(url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as tmp_file: # Not delete because we're renaming it
+ tmp_file_name = tmp_file.name
shutil.copyfileobj(r.raw, tmp_file)
tmp_file.flush()
os.fsync(tmp_file.fileno())
- os.rename(tmp_file.name, final_file)
+
+ os.rename(tmp_file_name, final_file)
return final_file
|
macisamuele/language-formatters-pre-commit-hooks
|
d61557624229ea8d012c541b2690585f210e57cb
|
diff --git a/tests/pretty_format_golang_test.py b/tests/pretty_format_golang_test.py
index f910b23..5875b3c 100644
--- a/tests/pretty_format_golang_test.py
+++ b/tests/pretty_format_golang_test.py
@@ -6,7 +6,9 @@ from __future__ import unicode_literals
import shutil
import pytest
+from mock import patch
+from language_formatters_pre_commit_hooks.pretty_format_golang import _get_eol_attribute
from language_formatters_pre_commit_hooks.pretty_format_golang import pretty_format_golang
from tests.conftest import change_dir_context
from tests.conftest import undecorate_function
@@ -46,3 +48,17 @@ def test_pretty_format_golang_autofix(tmpdir, undecorate_method):
# file was formatted (shouldn't trigger linter again)
ret = undecorate_method([srcfile.strpath])
assert ret == 0
+
+
[email protected](
+ 'exit_status, output, expected_eol',
+ [
+ (1, '', None),
+ (0, '', None),
+ (0, 'a\0eol\0lf\0', 'lf'),
+ ],
+)
+@patch('language_formatters_pre_commit_hooks.pretty_format_golang.run_command', autospec=True)
+def test__get_eol_attribute(mock_run_command, exit_status, output, expected_eol):
+ mock_run_command.return_value = (exit_status, output)
+ assert _get_eol_attribute() == expected_eol
diff --git a/tests/utils_test.py b/tests/utils_test.py
index 9924209..f47593c 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -4,6 +4,7 @@ from __future__ import print_function
from __future__ import unicode_literals
import os
+import sys
from os.path import basename
import mock
@@ -18,8 +19,15 @@ from language_formatters_pre_commit_hooks.utils import run_command
@pytest.mark.parametrize(
'command, expected_status, expected_output',
[
- ['echo "1"', 0, '1\n'],
- ['echo "1" | grep 0', 1, ''],
+ ('echo 1', 0, '1{}'.format(os.linesep)),
+ pytest.param(
+ 'echo 1 | grep 0', 1, '',
+ marks=pytest.mark.skipif(condition=sys.platform == 'win32', reason='Windows does not have `grep`'),
+ ),
+ pytest.param(
+ 'echo 1 | findstr 0', 1, '',
+ marks=pytest.mark.skipif(condition=sys.platform != 'win32', reason='Linux and MacOS does not have `findstr`'),
+ ),
['true', 0, ''],
['false', 1, ''],
],
|
in windows,can not download google format jar
error:
**PermissionError: [WinError 32] 另一个程序正在使用此文件,进程无法访问。**
means file not close
in utils:
```
with tempfile.NamedTemporaryFile(delete=False) as tmp_file: # Not delete because we're renaming it
shutil.copyfileobj(r.raw, tmp_file)
tmp_file.flush()
os.fsync(tmp_file.fileno())
tmp_file.close() # add this is ok
os.rename(tmp_file.name, final_file)
```
|
0.0
|
d61557624229ea8d012c541b2690585f210e57cb
|
[
"tests/pretty_format_golang_test.py::test__get_eol_attribute[1--None]",
"tests/pretty_format_golang_test.py::test__get_eol_attribute[0-a\\x00eol\\x00lf\\x00-lf]",
"tests/pretty_format_golang_test.py::test__get_eol_attribute[0--None]",
"tests/pretty_format_golang_test.py::test_pretty_format_golang[invalid.go-1]",
"tests/utils_test.py::test_run_command[echo",
"tests/utils_test.py::test_run_command[true-0-]",
"tests/utils_test.py::test_download_url[file:///root/data/temp_dir/tmpnp1icxpg/macisamuele__language-formatters-pre-commit-hooks__0.0/tests/utils_test.py-False]",
"tests/utils_test.py::test_run_command[false-1-]",
"tests/utils_test.py::test_download_url[file:///root/data/temp_dir/tmpnp1icxpg/macisamuele__language-formatters-pre-commit-hooks__0.0/tests/utils_test.py-True]"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-20 15:51:48+00:00
|
apache-2.0
| 3,674 |
|
macisamuele__language-formatters-pre-commit-hooks-9
|
diff --git a/language_formatters_pre_commit_hooks/pretty_format_yaml.py b/language_formatters_pre_commit_hooks/pretty_format_yaml.py
index 7897715..59f491c 100644
--- a/language_formatters_pre_commit_hooks/pretty_format_yaml.py
+++ b/language_formatters_pre_commit_hooks/pretty_format_yaml.py
@@ -32,10 +32,9 @@ def _process_single_document(document, yaml):
pretty_output = StringIO()
yaml.dump(content, pretty_output)
return pretty_output.getvalue()
- elif document:
+ else:
# do not disturb primitive content (unstructured text)
return str(document)
- return None
def pretty_format_yaml(argv=None):
@@ -79,6 +78,11 @@ def pretty_format_yaml(argv=None):
separator_pattern = r'^---\s*\n'
original_docs = re.split(separator_pattern, string_content, flags=re.MULTILINE)
+ # A valid multi-document YAML file might starts with the separator.
+ # In this case the first document of original docs will be empty and should not be consdered
+ if string_content.startswith('---'):
+ original_docs = original_docs[1:]
+
pretty_docs = []
try:
|
macisamuele/language-formatters-pre-commit-hooks
|
4f3dedcaf25199880fe10d58544317668536c18b
|
diff --git a/test-data/pretty_format_yaml/empty-doc-with-separator.yaml b/test-data/pretty_format_yaml/empty-doc-with-separator.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/test-data/pretty_format_yaml/empty-doc-with-separator.yaml
@@ -0,0 +1,1 @@
+---
diff --git a/test-data/pretty_format_yaml/empty-doc.yaml b/test-data/pretty_format_yaml/empty-doc.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/test-data/pretty_format_yaml/multi-doc-with-empty-document-inside.yaml b/test-data/pretty_format_yaml/multi-doc-with-empty-document-inside.yaml
new file mode 100644
index 0000000..89266d2
--- /dev/null
+++ b/test-data/pretty_format_yaml/multi-doc-with-empty-document-inside.yaml
@@ -0,0 +1,5 @@
+---
+a: 1
+---
+---
+b: 2
diff --git a/tests/pretty_format_yaml_test.py b/tests/pretty_format_yaml_test.py
index d0d17da..907c726 100644
--- a/tests/pretty_format_yaml_test.py
+++ b/tests/pretty_format_yaml_test.py
@@ -30,6 +30,9 @@ def change_dir():
('not-valid-file.yaml', 1),
('ansible-vault.yaml', 0),
('primitive.yaml', 0),
+ ('empty-doc-with-separator.yaml', 1),
+ ('empty-doc.yaml', 0),
+ ('multi-doc-with-empty-document-inside.yaml', 0),
),
)
def test_pretty_format_yaml(filename, expected_retval):
|
pretty-format-yaml removes empty documents
This issue has been opened as follow-up from https://github.com/macisamuele/language-formatters-pre-commit-hooks/pull/3#discussion_r420938179
|
0.0
|
4f3dedcaf25199880fe10d58544317668536c18b
|
[
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[empty-doc-with-separator.yaml-1]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[empty-doc.yaml-0]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[multi-doc-with-empty-document-inside.yaml-0]"
] |
[
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[pretty-formatted.yaml-0]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[not-pretty-formatted.yaml-1]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[multi-doc-pretty-formatted.yaml-0]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[multi-doc-not-pretty-formatted.yaml-1]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[not-valid-file.yaml-1]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[ansible-vault.yaml-0]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml[primitive.yaml-0]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml_autofix[not-pretty-formatted.yaml]",
"tests/pretty_format_yaml_test.py::test_pretty_format_yaml_autofix[multi-doc-not-pretty-formatted.yaml]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-20 18:33:23+00:00
|
apache-2.0
| 3,675 |
|
macports__upt-macports-18
|
diff --git a/upt_macports/upt_macports.py b/upt_macports/upt_macports.py
index a8bf18f..59e1fe4 100644
--- a/upt_macports/upt_macports.py
+++ b/upt_macports/upt_macports.py
@@ -32,7 +32,7 @@ class MacPortsPackage(object):
spdx2macports = json.loads(f.read())
return ' '.join([spdx2macports.get(license.spdx_identifier, 'unknown')
- for license in self.upt_pkg.licenses])
+ for license in self.upt_pkg.licenses]) or 'unknown'
def _depends(self, phase):
return self.upt_pkg.requirements.get(phase, [])
|
macports/upt-macports
|
e3905d65c8308cec8acbf38ec3b1ec1b18e8f523
|
diff --git a/upt_macports/tests/test_macports_package.py b/upt_macports/tests/test_macports_package.py
index b876f67..2b78a14 100644
--- a/upt_macports/tests/test_macports_package.py
+++ b/upt_macports/tests/test_macports_package.py
@@ -10,7 +10,7 @@ class TestMacPortsPackageLicenses(unittest.TestCase):
def test_no_licenses(self):
self.package.upt_pkg.licenses = []
- expected = ''
+ expected = 'unknown'
self.assertEqual(self.package.licenses, expected)
def test_one_license(self):
|
Update port
The `py-upt-macports` port is currently generating defunct packages, including #11 and printing empty licence name (https://github.com/macports/macports-ports/pull/4489).
Can you please update the `Portfile`?
@rajdeepbharati
|
0.0
|
e3905d65c8308cec8acbf38ec3b1ec1b18e8f523
|
[
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_no_licenses"
] |
[
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_bad_license",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_multiple_license",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_one_license",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageArchiveType::test_known_archive",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageArchiveType::test_no_archive",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageArchiveType::test_unknown_archive"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-29 09:20:23+00:00
|
bsd-3-clause
| 3,676 |
|
macports__upt-macports-48
|
diff --git a/upt_macports/upt_macports.py b/upt_macports/upt_macports.py
index 055b455..bee3821 100755
--- a/upt_macports/upt_macports.py
+++ b/upt_macports/upt_macports.py
@@ -16,11 +16,12 @@ class MacPortsPackage(object):
def create_package(self, upt_pkg, output):
self.upt_pkg = upt_pkg
self.logger.info(f'Creating MacPorts package for {self.upt_pkg.name}')
+ portfile_content = self._render_makefile_template()
if output is None:
- print(self._render_makefile_template())
+ print(portfile_content)
else:
self._create_output_directories(upt_pkg, output)
- self._create_portfile()
+ self._create_portfile(portfile_content)
def _create_output_directories(self, upt_pkg, output_dir):
"""Creates the directory layout required"""
@@ -34,12 +35,12 @@ class MacPortsPackage(object):
except PermissionError:
sys.exit(f'Cannot create {self.output_dir}: permission denied.')
- def _create_portfile(self):
+ def _create_portfile(self, portfile_content):
self.logger.info('Creating the Portfile')
try:
with open(os.path.join(self.output_dir, 'Portfile'), 'x',
encoding='utf-8') as f:
- f.write(self._render_makefile_template())
+ f.write(portfile_content)
except FileExistsError:
sys.exit(f'Cannot create {self.output_dir}/Portfile: already exists.') # noqa
|
macports/upt-macports
|
f3c01cfd8ea12bf8d1ba1329849311bd6719d98c
|
diff --git a/upt_macports/tests/test_macports_package.py b/upt_macports/tests/test_macports_package.py
index 814bac4..23ccd31 100644
--- a/upt_macports/tests/test_macports_package.py
+++ b/upt_macports/tests/test_macports_package.py
@@ -96,6 +96,17 @@ class TestDirectoryCreation(unittest.TestCase):
self.package._create_output_directories(self.package.upt_pkg,
'/ports/')
+ @mock.patch.object(MacPortsPackage, '_render_makefile_template',
+ side_effect=PermissionError)
+ @mock.patch.object(MacPortsPackage, '_create_output_directories')
+ @mock.patch.object(MacPortsPackage, '_create_portfile')
+ def test_render_makefile_error(self, portfile, outdir, render):
+ with self.assertRaises(PermissionError):
+ self.package.create_package(mock.Mock(), 'path')
+ render.assert_called()
+ outdir.assert_not_called()
+ portfile.assert_not_called()
+
class TestFileCreation(unittest.TestCase):
def setUp(self):
@@ -107,7 +118,7 @@ class TestFileCreation(unittest.TestCase):
def test_portfile_creation(self, m_open):
fn = 'upt_macports.upt_macports.MacPortsPackage._render_makefile_template' # noqa
with mock.patch(fn, return_value='Portfile content'):
- self.package._create_portfile()
+ self.package._create_portfile('Portfile content')
m_open.assert_called_once_with('/outdir/Portfile', 'x',
encoding='utf-8')
m_open().write.assert_called_once_with('Portfile content')
@@ -115,7 +126,7 @@ class TestFileCreation(unittest.TestCase):
@mock.patch('builtins.open', side_effect=FileExistsError)
def test_portfile_file_exists(self, m_open):
with self.assertRaises(SystemExit):
- self.package._create_portfile()
+ self.package._create_portfile('Portfile content')
class TestMacPortsPackageArchiveType(unittest.TestCase):
|
Do not create (or clean-up) the directory structure/Portfile if something fails
When testing the templates, I ran into a few situations where while creating the package we would stumble upon an ```upt.upt.ArchiveUnavailable: No such archive could be found``` problem.
For example,
```
py-scitools
[INFO ] [Backend] Hello, creating the package
[INFO ] [Backend] Creating the directory structure in /tmp/upt-pypi-packaging
[INFO ] [Backend] Created /tmp/upt-pypi-packaging/python/py-scitools
[INFO ] [Backend] Creating the Portfile
Traceback (most recent call last):
File "/tmp/test-3.7/bin/upt", line 11, in <module>
load_entry_point('upt==0.9', 'console_scripts', 'upt')()
File "/tmp/test-3.7/lib/python3.7/site-packages/upt/upt.py", line 302, in main
backend.create_package(upt_pkg, output=args.output)
File "/tmp/test-3.7/lib/python3.7/site-packages/upt_macports-0.1-py3.7.egg/upt_macports/upt_macports.py", line 253, in create_package
File "/tmp/test-3.7/lib/python3.7/site-packages/upt_macports-0.1-py3.7.egg/upt_macports/upt_macports.py", line 22, in create_package
File "/tmp/test-3.7/lib/python3.7/site-packages/upt_macports-0.1-py3.7.egg/upt_macports/upt_macports.py", line 51, in _create_portfile
File "/tmp/test-3.7/lib/python3.7/site-packages/upt_macports-0.1-py3.7.egg/upt_macports/upt_macports.py", line 64, in _render_makefile_template
File "/tmp/test-3.7/lib/python3.7/site-packages/Jinja2-2.10.1-py3.7.egg/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/tmp/test-3.7/lib/python3.7/site-packages/Jinja2-2.10.1-py3.7.egg/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/tmp/test-3.7/lib/python3.7/site-packages/Jinja2-2.10.1-py3.7.egg/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/tmp/test-3.7/lib/python3.7/site-packages/Jinja2-2.10.1-py3.7.egg/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "<template>", line 1, in top-level template code
File "<template>", line 19, in top-level template code
File "<template>", line 8, in block "nameversion"
File "/tmp/test-3.7/lib/python3.7/site-packages/upt_macports-0.1-py3.7.egg/upt_macports/upt_macports.py", line 144, in _python_root_name
File "/tmp/test-3.7/lib/python3.7/site-packages/upt/upt.py", line 187, in get_archive
raise ArchiveUnavailable()
upt.upt.ArchiveUnavailable: No such archive could be found
```
afterwards we do end up with an empty Portfile in the ```/python/py-scitools`` directory. One way around this would perhaps to first attempt to generate a Portfile in a temporary directory and only copy this over once everything finishes correctly. Or, alternatively, once we run into such Exceptions we could do some clean-up.
|
0.0
|
f3c01cfd8ea12bf8d1ba1329849311bd6719d98c
|
[
"upt_macports/tests/test_macports_package.py::TestDirectoryCreation::test_render_makefile_error",
"upt_macports/tests/test_macports_package.py::TestFileCreation::test_portfile_creation",
"upt_macports/tests/test_macports_package.py::TestFileCreation::test_portfile_file_exists"
] |
[
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_bad_license",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_license_conversion_error",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_license_detection_failed",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_license_detection_success",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_license_not_found",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_multiple_license",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_no_licenses",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_one_license",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageLicenses::test_unknown_license",
"upt_macports/tests/test_macports_package.py::TestDirectoryCreation::test_create_directories_output",
"upt_macports/tests/test_macports_package.py::TestDirectoryCreation::test_create_directories_permission_error",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageArchiveType::test_known_archive",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageArchiveType::test_no_archive",
"upt_macports/tests/test_macports_package.py::TestMacPortsPackageArchiveType::test_unknown_archive"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-28 09:11:19+00:00
|
bsd-3-clause
| 3,677 |
|
mad-lab-fau__tpcp-31
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9a20a93..c1dfe08 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Pipelines, Optimize objects, and `Scorer` are now `Generic`. This improves typing (in particular with VsCode), but
means a little bit more typing (pun intended), when creating new Pipelines and Optimizers
(https://github.com/mad-lab-fau/tpcp/pull/29)
+- Added option for scoring function to return arbitrary additional information using the `NoAgg` wrapper
+ (https://github.com/mad-lab-fau/tpcp/pull/31)
### Changed
diff --git a/docs/modules/validate.rst b/docs/modules/validate.rst
index 3dd0a40..449f4d3 100644
--- a/docs/modules/validate.rst
+++ b/docs/modules/validate.rst
@@ -14,7 +14,8 @@ Classes
:toctree: generated/validate
:template: class_with_private.rst
- Scorer
+ Scorer
+ NoAgg
Functions
---------
diff --git a/tpcp/validate/__init__.py b/tpcp/validate/__init__.py
index c375dae..003b915 100644
--- a/tpcp/validate/__init__.py
+++ b/tpcp/validate/__init__.py
@@ -1,5 +1,5 @@
"""Module for all helper methods to evaluate algorithms."""
-from tpcp.validate._scorer import Scorer, aggregate_scores
+from tpcp.validate._scorer import NoAgg, Scorer, aggregate_scores
from tpcp.validate._validate import cross_validate
-__all__ = ["Scorer", "cross_validate", "aggregate_scores"]
+__all__ = ["Scorer", "NoAgg", "cross_validate", "aggregate_scores"]
diff --git a/tpcp/validate/_scorer.py b/tpcp/validate/_scorer.py
index d85f540..0c859b7 100644
--- a/tpcp/validate/_scorer.py
+++ b/tpcp/validate/_scorer.py
@@ -26,10 +26,13 @@ from typing_extensions import Protocol
from tpcp._dataset import Dataset, Dataset_
from tpcp._pipeline import Pipeline, Pipeline_
from tpcp._utils._score import _ERROR_SCORE_TYPE
-from tpcp.exceptions import ScorerFailed
+from tpcp.exceptions import ScorerFailed, ValidationError
+
+T = TypeVar("T")
SingleScoreType = float
-MultiScoreType = Dict[str, float]
+MultiScoreType = Dict[str, Union[float, "NoAgg"]]
+ScoreType = Union[SingleScoreType, MultiScoreType]
ScoreType_ = TypeVar("ScoreType_", SingleScoreType, MultiScoreType)
IndividualScoreType = Union[Dict[str, List[float]], List[float]]
@@ -59,6 +62,38 @@ class ScoreCallback(Protocol[Pipeline_, Dataset_, ScoreType_]):
...
+class NoAgg(Generic[T]):
+ """Wrapper to wrap one or multiple output values of a scorer to prevent aggregation of these values.
+
+ If one of the values in the return dictionary of a multi-value score function is wrapped with this class,
+ the scorer will not aggregate the value.
+ This allows to pass arbitary data from the score function through the scorer.
+ As example, this could be some general metadata, some non-numeric scores, or an array of values (e.g. when the
+ actual score is the mean of such values).
+
+ Examples
+ --------
+ >>> def score_func(pipe, dataset):
+ ... ...
+ ... return {"score_val_1": score, "some_metadata": NoAgg(metadata)}
+ >>> my_scorer = Scorer(score_func)
+
+ """
+
+ _value: T
+
+ def __init__(self, _value: T):
+ self._value = _value
+
+ def __repr__(self):
+ """Show the represnentation of the object."""
+ return f"{self.__class__.__name__}({repr(self._value)})"
+
+ def get_value(self) -> T:
+ """Return the value wrapped by NoAgg."""
+ return self._value
+
+
class Scorer(Generic[Pipeline_, Dataset_, ScoreType_]):
"""A scorer to score multiple data points of a dataset and average the results.
@@ -173,18 +208,22 @@ class Scorer(Generic[Pipeline_, Dataset_, ScoreType_]):
ScorerTypes = Union[ScoreFunc[Pipeline_, Dataset_, ScoreType_], Scorer[Pipeline_, Dataset_, ScoreType_], None]
-def _validate_score_return_val(value: Union[SingleScoreType, MultiScoreType, float]):
+def _validate_score_return_val(value: ScoreType):
"""We expect a scorer to return either a numeric value or a dictionary of such values."""
if isinstance(value, (int, float)):
return
if isinstance(value, dict):
for v in value.values():
- if not isinstance(v, (int, float)):
+ if not isinstance(v, (int, float, NoAgg)):
break
else:
return
- raise ValueError(
- "The scoring function must return either a dictionary of numeric values or a single numeric value."
+
+ raise ValidationError(
+ "The scoring function must have one of the following return types:\n"
+ "1. dictionary of numeric values or values wrapped by `NoAgg`.\n"
+ "2. single numeric value.\n\n"
+ f"You return value was {value}"
)
@@ -283,9 +322,23 @@ def aggregate_scores(
agg_scores: Dict[str, float] = {}
# Invert the dict and calculate the mean per score:
for key in score_names:
- # If the scorer raised an error, there will only be a single value. This value will be used for all
- # scores then
- score_array = [score[key] if isinstance(score, dict) else score for score in scores]
+ key_is_no_agg = False
+ score_array = []
+ for score in scores:
+ if isinstance(score, dict):
+ score_val = score[key]
+ if isinstance(score_val, NoAgg):
+ # If one of the values are wrapped in NoAgg, we will not aggregate the values and only remove the
+ # NoAgg warpper.
+ key_is_no_agg = True
+ score_array.append(score_val.get_value())
+ else:
+ score_array.append(score_val)
+ else:
+ # If the scorer raised an error, there will only be a single value. This value will be used for all
+ # scores then
+ score_array.append(score)
inv_scores[key] = score_array
- agg_scores[key] = agg_method(score_array)
+ if not key_is_no_agg:
+ agg_scores[key] = agg_method(score_array)
return agg_scores, inv_scores
|
mad-lab-fau/tpcp
|
7ff42555e8a30e521f9dc2198914011bb0d992f4
|
diff --git a/tests/test_pipelines/test_scorer.py b/tests/test_pipelines/test_scorer.py
index 3aae28b..2cf23ef 100644
--- a/tests/test_pipelines/test_scorer.py
+++ b/tests/test_pipelines/test_scorer.py
@@ -12,8 +12,9 @@ from tests.test_pipelines.conftest import (
dummy_multi_score_func,
dummy_single_score_func,
)
+from tpcp.exceptions import ValidationError
from tpcp.validate import Scorer
-from tpcp.validate._scorer import _passthrough_scoring, _validate_scorer
+from tpcp.validate._scorer import NoAgg, _passthrough_scoring, _validate_scorer
class TestScorerCalls:
@@ -69,6 +70,27 @@ class TestScorer:
else:
assert v == np.mean(data.groups)
+ def test_score_return_val_multi_score_no_agg(self):
+ def multi_score_func(pipeline, data_point):
+ return {"score_1": data_point.groups[0], "no_agg_score": NoAgg(str(data_point.groups))}
+
+ scorer = Scorer(multi_score_func)
+ pipe = DummyOptimizablePipeline()
+ data = DummyDataset()
+ agg, single = scorer(pipe, data, np.nan)
+ assert isinstance(single, dict)
+ for k, v in single.items():
+ assert len(v) == len(data)
+ # Our Dummy scorer, returns the groupname of the dataset as string in the no-agg case
+ if k == "no_agg_score":
+ assert all(np.array(v) == [str(d.groups) for d in data])
+ else:
+ assert all(np.array(v) == data.groups)
+ assert isinstance(agg, dict)
+ assert "score_1" in agg
+ assert "no_agg_score" not in agg
+ assert agg["score_1"] == np.mean(data.groups)
+
@pytest.mark.parametrize("err_val", (np.nan, 1))
def test_scoring_return_err_val(self, err_val):
scorer = Scorer(dummy_error_score_func)
@@ -128,15 +150,17 @@ class TestScorer:
assert str(e.value) == "Dummy Error for 0"
@pytest.mark.parametrize("error_score", ("raise", 0))
- @pytest.mark.parametrize("bad_scorer", (lambda x, y: "test", lambda x, y: {"val": "test"}))
+ @pytest.mark.parametrize(
+ "bad_scorer", (lambda x, y: "test", lambda x, y: {"val": "test"}, lambda x, y: NoAgg(None))
+ )
def test_bad_scorer(self, error_score, bad_scorer):
"""Check that we catch cases where the scoring func returns invalid values independent of the error_score val"""
scorer = Scorer(bad_scorer)
pipe = DummyOptimizablePipeline()
data = DummyDataset()
- with pytest.raises(ValueError) as e:
+ with pytest.raises(ValidationError) as e:
scorer(pipe, data, error_score)
- assert "The scoring function must return" in str(e.value)
+ assert "The scoring function must have one" in str(e.value)
def test_kwargs_passed(self):
kwargs = {"a": 3, "b": "test"}
@@ -174,6 +198,9 @@ class TestScorer:
pipe = DummyOptimizablePipeline()
scorer(pipeline=pipe, dataset=DummyDataset(), error_score=np.nan)
+ def test_no_agg_scoring(self):
+ pass
+
def _dummy_func(x):
return x
|
Allow scoring functions to return arbitary additional information
|
0.0
|
7ff42555e8a30e521f9dc2198914011bb0d992f4
|
[
"tests/test_pipelines/test_scorer.py::TestScorerCalls::test_score_func_called",
"tests/test_pipelines/test_scorer.py::TestScorer::test_score_return_val_single_score",
"tests/test_pipelines/test_scorer.py::TestScorer::test_score_return_val_multi_score",
"tests/test_pipelines/test_scorer.py::TestScorer::test_score_return_val_multi_score_no_agg",
"tests/test_pipelines/test_scorer.py::TestScorer::test_scoring_return_err_val[nan]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_scoring_return_err_val[1]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_scoring_return_err_val_multi[nan]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_scoring_return_err_val_multi[1]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_err_val_raises",
"tests/test_pipelines/test_scorer.py::TestScorer::test_bad_scorer[<lambda>0-raise]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_bad_scorer[<lambda>0-0]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_bad_scorer[<lambda>1-raise]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_bad_scorer[<lambda>1-0]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_bad_scorer[<lambda>2-raise]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_bad_scorer[<lambda>2-0]",
"tests/test_pipelines/test_scorer.py::TestScorer::test_kwargs_passed",
"tests/test_pipelines/test_scorer.py::TestScorer::test_callback_called",
"tests/test_pipelines/test_scorer.py::TestScorer::test_documented_callback_signature_valid",
"tests/test_pipelines/test_scorer.py::TestScorer::test_no_agg_scoring",
"tests/test_pipelines/test_scorer.py::TestScorerUtils::test_validate_scorer[None-expected0]",
"tests/test_pipelines/test_scorer.py::TestScorerUtils::test_validate_scorer[_dummy_func-expected1]",
"tests/test_pipelines/test_scorer.py::TestScorerUtils::test_validate_scorer[scoring2-expected2]",
"tests/test_pipelines/test_scorer.py::TestScorerUtils::test_score_not_implemented",
"tests/test_pipelines/test_scorer.py::TestScorerUtils::test_invalid_input"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-30 15:25:42+00:00
|
mit
| 3,678 |
|
madelyneriksen__dashml-10
|
diff --git a/README.md b/README.md
index 7f89aaf..56f6bb1 100644
--- a/README.md
+++ b/README.md
@@ -242,7 +242,6 @@ python bench.py # or `make bench`
DashML could not be built without these libraries:
* [lxml](https://lxml.de) for creating a _fast_ XML library.
-* [Pallets Projects](https://palletsprojects.com/) for creating MarkupSafe
## License
diff --git a/dashml/core.py b/dashml/core.py
index 3f44b5c..bc5a3ad 100644
--- a/dashml/core.py
+++ b/dashml/core.py
@@ -8,21 +8,17 @@ else:
VarArg = lambda x: t.List[x]
KwArg = lambda x: t.Dict[str, x]
-from lxml.etree import Element
+from lxml.etree import _Element as Element
import lxml.html as html
from lxml.builder import E as raw_builder
-import markupsafe
-
__all__ = ["_", "render", "unsafe_from_string"]
T = t.TypeVar("T")
-
-
-Child = t.Union[Element, str, None]
-Prop = t.Union[str, int, bool, None]
+Child = t.Union[Element, str, int, float, None]
+Prop = t.Union[str, int, float, bool, None]
class Builder:
@@ -65,23 +61,18 @@ def render(ele: Element) -> str:
Returns:
(str) Rendered utf-8 string of the element.
"""
- raw: bytes = html.tostring(ele)
- return raw.decode("utf-8")
+ return html.tostring(ele).decode("utf-8")
_ = Builder()
-@singledispatch
def safe(var: Child) -> Child:
"""Mark a value as safe."""
- return var
-
-
[email protected]
-def __safe_string(var: str) -> str:
- """Escape a string."""
- return str(markupsafe.escape(var)) # pragma: no cover
+ if isinstance(var, Element):
+ return var
+ else:
+ return str(var)
# Like `className` or `htmlFor` in React.
@@ -98,10 +89,19 @@ def swap_attributes(attrs: t.Dict[str, Prop]) -> None:
class_name and html_for.
"""
for key, value in attrs.items():
+ if isinstance(value, bool) or value is None:
+ # Convert booleans/Nonetypes into HTML5 compatible booleans.
+ if value:
+ attrs[key] = ""
+ else:
+ del attrs[key]
+ continue
if key.startswith("data_") or key.startswith("aria_"):
- attrs[key.replace("_", "-")] = attrs.pop(key)
+ attrs[key.replace("_", "-")] = str(attrs.pop(key))
elif key in RESERVED_PAIRS:
- attrs[RESERVED_PAIRS[key]] = attrs.pop(key)
+ attrs[RESERVED_PAIRS[key]] = str(attrs.pop(key))
+ else:
+ attrs[key] = str(value)
def unsafe_from_string(unsafe_string: str) -> Element:
diff --git a/dashml/stubs/lxml/etree.pyi b/dashml/stubs/lxml/etree.pyi
index cd35f86..f6b3467 100644
--- a/dashml/stubs/lxml/etree.pyi
+++ b/dashml/stubs/lxml/etree.pyi
@@ -1,3 +1,5 @@
import typing as t
-class Element: ...
+class _Element: ...
+
+def Element() -> _Element: ...
diff --git a/docs/quickstart.md b/docs/quickstart.md
index 319b6fe..be6a18f 100644
--- a/docs/quickstart.md
+++ b/docs/quickstart.md
@@ -8,7 +8,7 @@ Like most Python packages, DashML is available on the Python Package Index for i
pip install dashml
```
-When you install DashML, its two dependencies [MarkupSafe](https://pypi.org/project/MarkupSafe/) and [lxml](https://pypi.org/project/lxml/) will be installed as well.
+When you install DashML, its dependency [lxml](https://pypi.org/project/lxml/) will be installed as well.
## Create A Component
diff --git a/requirements.txt b/requirements.txt
index ab3bad2..ab90481 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,1 @@
lxml
-markupsafe
diff --git a/setup.py b/setup.py
index 43ca931..7b15847 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,6 @@ setuptools.setup(
packages=["dashml"],
install_requires=[
"lxml",
- "markupsafe",
],
classifiers=[
"Programming Language :: Python :: 3",
|
madelyneriksen/dashml
|
bf8b5833133d4d66e2c382007746c26c8e45fe6b
|
diff --git a/dashml/tests/test_html_escaping.py b/dashml/tests/test_html_escaping.py
index 0101ed1..8a47864 100644
--- a/dashml/tests/test_html_escaping.py
+++ b/dashml/tests/test_html_escaping.py
@@ -8,3 +8,23 @@ def test_render_script_fails():
"""Gratuitous test of the classic alert."""
value = render(_.p("<script>alert('Hello, world!')</script>"))
assert value == ("<p><script>alert('Hello, world!')</script></p>")
+
+
+def test_render_boolean():
+ """Test the rendering of boolean attributes."""
+ val = render(_.input(type="checkbox", checked=True))
+ assert val == '<input type="checkbox" checked>'
+
+ val = render(_.option("California", selected=True))
+ assert val == "<option selected>California</option>"
+
+
+def test_render_numbers():
+ val = render(_.p(8))
+ assert val == "<p>8</p>"
+
+ val = render(_.p(8.8))
+ assert val == "<p>8.8</p>"
+
+ val = render(_.div(data_number=8))
+ assert val == '<div data-number="8"></div>'
diff --git a/dashml/tests/test_html_rendering.py b/dashml/tests/test_html_rendering.py
index 91e2dbb..17ed220 100644
--- a/dashml/tests/test_html_rendering.py
+++ b/dashml/tests/test_html_rendering.py
@@ -1,6 +1,8 @@
"""Generic tests for rendering DashML components."""
+import pytest
+
from dashml import _, render
|
Rendering for non-string types.
# Overview
DashML should be able to render non-string types.
## How
* Most classes should probably be cast to a string, as a form of "least surprise".
* Boolean attributes should obey HTML5 boolean rules, which means `true` should keep the attribute and `false` should remove it.
This will be implemented in `core.swap_attributes`.
|
0.0
|
bf8b5833133d4d66e2c382007746c26c8e45fe6b
|
[
"dashml/tests/test_html_escaping.py::test_render_script_fails",
"dashml/tests/test_html_escaping.py::test_render_boolean"
] |
[
"dashml/tests/test_html_rendering.py::test_render_simple_article"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-23 01:34:17+00:00
|
mit
| 3,679 |
|
magmax__python-inquirer-516
|
diff --git a/examples/list_hints.py b/examples/list_hints.py
index b2877ea..ce76801 100644
--- a/examples/list_hints.py
+++ b/examples/list_hints.py
@@ -3,9 +3,9 @@ from pprint import pprint
import inquirer # noqa
choices_hints = {
- "Jumbo": "The biggest one we have",
- "Large": "If you need the extra kick",
"Standard": "For your every day use",
+ "Large": "If you need the extra kick",
+ "Jumbo": "The biggest one we have",
}
questions = [
diff --git a/src/inquirer/questions.py b/src/inquirer/questions.py
index 39d8581..741d778 100644
--- a/src/inquirer/questions.py
+++ b/src/inquirer/questions.py
@@ -12,29 +12,29 @@ from inquirer.render.console._other import GLOBAL_OTHER_CHOICE
class TaggedValue:
- def __init__(self, choice):
- self.label = choice[0]
- self.tag = choice[1]
- self._hash = hash(choice)
+ def __init__(self, tag, value):
+ self.tag = tag
+ self.value = value
+ self.tuple = (tag, value)
def __str__(self):
- return self.label
+ return self.tag
def __repr__(self):
- return repr(self.tag)
+ return repr(self.value)
def __eq__(self, other):
if isinstance(other, TaggedValue):
- return other.tag == self.tag
+ return other.value == self.value
if isinstance(other, tuple):
- return other == (self.label, self.tag)
- return other == self.tag
+ return other == self.tuple
+ return other == self.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self) -> int:
- return self._hash
+ return hash(self.tuple)
class Question:
@@ -93,7 +93,7 @@ class Question:
@property
def choices_generator(self):
for choice in self._solve(self._choices):
- yield (TaggedValue(choice) if isinstance(choice, tuple) and len(choice) == 2 else choice)
+ yield (TaggedValue(*choice) if isinstance(choice, tuple) and len(choice) == 2 else choice)
@property
def choices(self):
|
magmax/python-inquirer
|
0b658eff177845775f5eae807fdc833754b7377e
|
diff --git a/tests/unit/test_question.py b/tests/unit/test_question.py
index 9c9bdc8..068061d 100644
--- a/tests/unit/test_question.py
+++ b/tests/unit/test_question.py
@@ -354,16 +354,16 @@ class TestPathQuestion(unittest.TestCase):
def test_tagged_value():
LABEL = "label"
- TAG = "l"
- tp = (LABEL, TAG)
- tv = questions.TaggedValue(tp)
+ VALUE = "l"
+ tp = (LABEL, VALUE)
+ tv = questions.TaggedValue(*tp)
assert (str(tv) == str(LABEL)) is True
- assert (repr(tv) == repr(TAG)) is True
+ assert (repr(tv) == repr(VALUE)) is True
assert (hash(tv) == hash(tp)) is True
assert (tv == tv) is True
assert (tv != tv) is False
assert (tv == tp) is True
- assert (tv == TAG) is True
+ assert (tv == VALUE) is True
assert (tv == "") is False
|
Tuples as list choices no longer works as expected with v3.2.2 - regression from v3.2.1
The following example prints the integer part of the selected entry with v3.2.1 but the string entry in v3.2.2. I think this is a regression and not working as expected.
```python
import inquirer
entries = [
("one", 1),
("two", 2),
("three", 3)
]
questions = [
inquirer.List('device', message="test", choices=entries),
]
answers = inquirer.prompt(questions)
print(answers['device'])
```
With v3.2.1: `2`
With v3.2.2: `two`
|
0.0
|
0b658eff177845775f5eae807fdc833754b7377e
|
[
"tests/unit/test_question.py::test_tagged_value"
] |
[
"tests/unit/test_question.py::BaseQuestionTests::test_base_question_type",
"tests/unit/test_question.py::BaseQuestionTests::test_default_choices_value",
"tests/unit/test_question.py::BaseQuestionTests::test_default_default_value",
"tests/unit/test_question.py::BaseQuestionTests::test_default_message_is_empty",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_bad_type",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_checkbox_type",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_confirm_type",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_list_type",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_located_list_type",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_password_type",
"tests/unit/test_question.py::BaseQuestionTests::test_factory_text_type",
"tests/unit/test_question.py::BaseQuestionTests::test_ignore_function_receives_answers",
"tests/unit/test_question.py::BaseQuestionTests::test_ignore_works_for_false",
"tests/unit/test_question.py::BaseQuestionTests::test_ignore_works_for_function_returning_false",
"tests/unit/test_question.py::BaseQuestionTests::test_ignore_works_for_function_returning_none",
"tests/unit/test_question.py::BaseQuestionTests::test_ignore_works_for_function_returning_true",
"tests/unit/test_question.py::BaseQuestionTests::test_ignore_works_for_true",
"tests/unit/test_question.py::BaseQuestionTests::test_load_from_dict_text_type",
"tests/unit/test_question.py::BaseQuestionTests::test_load_from_json_list",
"tests/unit/test_question.py::BaseQuestionTests::test_load_from_json_text_type",
"tests/unit/test_question.py::BaseQuestionTests::test_message_previous_answers_replacement",
"tests/unit/test_question.py::BaseQuestionTests::test_message_set",
"tests/unit/test_question.py::BaseQuestionTests::test_setting_choices_value",
"tests/unit/test_question.py::BaseQuestionTests::test_setting_default_value",
"tests/unit/test_question.py::BaseQuestionTests::test_validate_false_raises_exception",
"tests/unit/test_question.py::BaseQuestionTests::test_validate_function_raising_validation_error",
"tests/unit/test_question.py::BaseQuestionTests::test_validate_function_receives_object",
"tests/unit/test_question.py::BaseQuestionTests::test_validate_function_returning_false_raises_exception",
"tests/unit/test_question.py::BaseQuestionTests::test_validate_function_returning_true_ends_ok",
"tests/unit/test_question.py::TestConfirmQuestion::test_default_default_value_is_false_instead_of_none",
"tests/unit/test_question.py::TestPathQuestion::test_default_value_validation",
"tests/unit/test_question.py::TestPathQuestion::test_normalizing_value",
"tests/unit/test_question.py::TestPathQuestion::test_path_type_validation_existing",
"tests/unit/test_question.py::TestPathQuestion::test_path_type_validation_no_existence_check",
"tests/unit/test_question.py::TestPathQuestion::test_path_validation"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-29 13:49:08+00:00
|
mit
| 3,680 |
|
magmax__python-inquirer-526
|
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 2ea1b1f..be3d7aa 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,3 +1,3 @@
-furo==2023.9.10
+furo==2024.1.29
sphinx==7.2.5
myst_parser==2.0.0
diff --git a/src/inquirer/questions.py b/src/inquirer/questions.py
index 741d778..e1fb427 100644
--- a/src/inquirer/questions.py
+++ b/src/inquirer/questions.py
@@ -60,7 +60,7 @@ class Question:
self._validate = validate
self.answers = {}
self.show_default = show_default
- self.hints = hints or {}
+ self.hints = hints
self._other = other
if self._other:
diff --git a/src/inquirer/render/console/__init__.py b/src/inquirer/render/console/__init__.py
index 7e0d84d..70ed668 100644
--- a/src/inquirer/render/console/__init__.py
+++ b/src/inquirer/render/console/__init__.py
@@ -93,7 +93,9 @@ class ConsoleRender:
def _print_hint(self, render):
msg_template = "{t.move_up}{t.clear_eol}{color}{msg}"
- hint = render.get_hint()
+ hint = ""
+ if render.question.hints is not None:
+ hint = render.get_hint()
color = self._theme.Question.mark_color
if hint:
self.print_str(
|
magmax/python-inquirer
|
a4a6615981a1716a9c11e4f775794c4712d46bf5
|
diff --git a/tests/integration/console_render/test_list.py b/tests/integration/console_render/test_list.py
index c4b2e74..7cd3dea 100644
--- a/tests/integration/console_render/test_list.py
+++ b/tests/integration/console_render/test_list.py
@@ -165,3 +165,19 @@ class ListRenderTest(unittest.TestCase, helper.BaseTestCase):
sut.render(question)
self.assertInStdout("Bar")
+
+ def test_taggedValue_with_dict(self):
+ stdin = helper.event_factory(key.DOWN, key.ENTER)
+ message = "Foo message"
+ variable = "Bar variable"
+ choices = [
+ ("aa", {"a": 1}),
+ ("bb", {"b": 2}),
+ ]
+
+ question = questions.List(variable, message, choices=choices)
+
+ sut = ConsoleRender(event_generator=stdin)
+ sut.render(question)
+
+ self.assertInStdout("bb")
|
Tuples as list choices can't be hashed when containing a dict
In 3.2.1, the following worked:
```
>>> import inquirer
>>> test = [('aa', {'a': 1}), ('bb', {'b':2})]
>>> inquirer.list_input('Which?', carousel=True, choices=test)
[?] Which?: bb
aa
> bb
{'b': 2}
```
Whereas in 3.2.2 (and 3.2.3) this doesn't:
```
>>> import inquirer
>>> test = [('aa', {'a': 1}), ('bb', {'b':2})]
>>> inquirer.list_input('Which?', carousel=True, choices=test)
[?] Which?:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "App/.venv/lib/python3.11/site-packages/inquirer/shortcuts.py", line 32, in list_input
return render.render(question)
^^^^^^^^^^^^^^^^^^^^^^^
File "App/.venv/lib/python3.11/site-packages/inquirer/render/console/__init__.py", line 38, in render
return self._event_loop(render)
^^^^^^^^^^^^^^^^^^^^^^^^
File "App/.venv/lib/python3.11/site-packages/inquirer/render/console/__init__.py", line 49, in _event_loop
self._print_hint(render)
File "App/.venv/lib/python3.11/site-packages/inquirer/render/console/__init__.py", line 96, in _print_hint
hint = render.get_hint()
^^^^^^^^^^^^^^^^^
File "App/.venv/lib/python3.11/site-packages/inquirer/render/console/_list.py", line 23, in get_hint
hint = self.question.hints[choice]
~~~~~~~~~~~~~~~~~~~^^^^^^^^
File "App/.venv/lib/python3.11/site-packages/inquirer/questions.py", line 37, in __hash__
return hash(self.tuple)
^^^^^^^^^^^^^^^^
TypeError: unhashable type: 'dict'
```
The reason is that [`TaggedValue`'s `hash`](https://github.com/magmax/python-inquirer/blob/a4a6615981a1716a9c11e4f775794c4712d46bf5/src/inquirer/questions.py#L37) function tries to hash whole tuple passed. Would it be enough to hash just the string representation (i.e. `hash(self.tag)`), instead of the whole tuple? I'd say the list of choices should provide unique string descriptions for every item (although it might technically not be necessary when passing `hints=` as well?)
|
0.0
|
a4a6615981a1716a9c11e4f775794c4712d46bf5
|
[
"tests/integration/console_render/test_list.py::ListRenderTest::test_taggedValue_with_dict"
] |
[
"tests/integration/console_render/test_list.py::ListRenderTest::test_all_choices_are_shown",
"tests/integration/console_render/test_list.py::ListRenderTest::test_choose_the_first",
"tests/integration/console_render/test_list.py::ListRenderTest::test_choose_the_second",
"tests/integration/console_render/test_list.py::ListRenderTest::test_choose_with_long_choices",
"tests/integration/console_render/test_list.py::ListRenderTest::test_ctrl_c_breaks_execution",
"tests/integration/console_render/test_list.py::ListRenderTest::test_first_hint_is_shown",
"tests/integration/console_render/test_list.py::ListRenderTest::test_move_down_carousel",
"tests/integration/console_render/test_list.py::ListRenderTest::test_move_up",
"tests/integration/console_render/test_list.py::ListRenderTest::test_move_up_carousel",
"tests/integration/console_render/test_list.py::ListRenderTest::test_second_hint_is_shown"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-03 23:33:00+00:00
|
mit
| 3,681 |
|
mahmoud__boltons-203
|
diff --git a/boltons/funcutils.py b/boltons/funcutils.py
index 1a34373..17ab6d5 100644
--- a/boltons/funcutils.py
+++ b/boltons/funcutils.py
@@ -474,6 +474,7 @@ class FunctionBuilder(object):
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
+ "annotations": dict,
'filename': lambda: 'boltons.funcutils.FunctionBuilder'}
_defaults.update(_argspec_defaults)
@@ -495,7 +496,12 @@ class FunctionBuilder(object):
# def get_argspec(self): # TODO
if _IS_PY2:
- def get_sig_str(self):
+ def get_sig_str(self, with_annotations=True):
+ """Return function signature as a string.
+
+ with_annotations is ignored on Python 2. On Python 3 signature
+ will omit annotations if it is set to False.
+ """
return inspect.formatargspec(self.args, self.varargs,
self.varkw, [])
@@ -503,14 +509,23 @@ class FunctionBuilder(object):
return inspect.formatargspec(self.args, self.varargs,
self.varkw, [])[1:-1]
else:
- def get_sig_str(self):
+ def get_sig_str(self, with_annotations=True):
+ """Return function signature as a string.
+
+ with_annotations is ignored on Python 2. On Python 3 signature
+ will omit annotations if it is set to False.
+ """
+ if with_annotations:
+ annotations = self.annotations
+ else:
+ annotations = {}
return inspect.formatargspec(self.args,
self.varargs,
self.varkw,
[],
self.kwonlyargs,
{},
- self.annotations)
+ annotations)
_KWONLY_MARKER = re.compile(r"""
\* # a star
@@ -552,6 +567,7 @@ class FunctionBuilder(object):
kwargs = {'name': func.__name__,
'doc': func.__doc__,
'module': func.__module__,
+ 'annotations': getattr(func, "__annotations__", {}),
'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
@@ -590,7 +606,7 @@ class FunctionBuilder(object):
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
- src = tmpl.format(name=name, sig_str=self.get_sig_str(),
+ src = tmpl.format(name=name, sig_str=self.get_sig_str(with_annotations=False),
doc=self.doc, body=body)
self._compile(src, execdict)
func = execdict[name]
@@ -600,6 +616,7 @@ class FunctionBuilder(object):
func.__defaults__ = self.defaults
if not _IS_PY2:
func.__kwdefaults__ = self.kwonlydefaults
+ func.__annotations__ = self.annotations
if with_dict:
func.__dict__.update(self.dict)
|
mahmoud/boltons
|
761a8477ba9f0372a4415c588846fd6aa1fb1521
|
diff --git a/tests/test_funcutils_fb_py3.py b/tests/test_funcutils_fb_py3.py
index 3effd7e..d0b4a79 100644
--- a/tests/test_funcutils_fb_py3.py
+++ b/tests/test_funcutils_fb_py3.py
@@ -1,5 +1,6 @@
import inspect
+from collections import defaultdict
import pytest
@@ -20,12 +21,13 @@ def pita_wrap(flag=False):
def test_wraps_py3():
@pita_wrap(flag=True)
- def annotations(a: int, b: float=1) -> "tuple":
- return a, b
+ def annotations(a: int, b: float=1, c: defaultdict=()) -> defaultdict:
+ return a, b, c
- annotations(0) == (True, "annotations", (0, 1))
- annotations.__annotations__ == {'a': int, 'b': float,
- 'return': 'tuple'}
+ assert annotations(0) == (True, "annotations", (0, 1, ()))
+ assert annotations.__annotations__ == {'a': int, 'b': float,
+ 'c': defaultdict,
+ 'return': defaultdict}
@pita_wrap(flag=False)
def kwonly_arg(a, *, b, c=2):
@@ -34,8 +36,8 @@ def test_wraps_py3():
with pytest.raises(TypeError):
kwonly_arg(0)
- kwonly_arg(0, b=1) == (False, "kwonly_arg", (0, 1, 2))
- kwonly_arg(0, b=1, c=3) == (False, "kwonly_arg", (0, 1, 3))
+ assert kwonly_arg(0, b=1) == (False, "kwonly_arg", (0, 1, 2))
+ assert kwonly_arg(0, b=1, c=3) == (False, "kwonly_arg", (0, 1, 3))
@pita_wrap(flag=True)
def kwonly_non_roundtrippable_repr(*, x=lambda y: y + 1):
|
`boltons.funcutils.wraps` breaks with Python 3 function annotations
Tested with `boltons==17.1.0`
Repro: Executing the following script fails
```python
from boltons.funcutils import wraps
from typing import Optional
def wrap_it(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
f(*args, **kwargs)
return wrapper
@wrap_it
def foo(x: Optional[int]) -> Optional[int]:
return x
```
Results in
```bash
rwilliams at rwilliams-mbp114 in ~/src/oss/boltons (master●) (venv)
$ python ../../test_wraps_with_annotations.py
Traceback (most recent call last):
File "../../test_wraps_with_annotations.py", line 11, in <module>
def foo(x: Optional[int]) -> Optional[int]:
File "../../test_wraps_with_annotations.py", line 5, in wrap_it
@wraps(f)
File "/Users/rwilliams/src/oss/boltons/boltons/funcutils.py", line 307, in wrapper_wrapper
fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
File "/Users/rwilliams/src/oss/boltons/boltons/funcutils.py", line 526, in get_func
self._compile(src, execdict)
File "/Users/rwilliams/src/oss/boltons/boltons/funcutils.py", line 590, in _compile
exec(code, execdict)
File "<boltons.funcutils.FunctionBuilder-0>", line 1, in <module>
NameError: name 'Union' is not defined
```
|
0.0
|
761a8477ba9f0372a4415c588846fd6aa1fb1521
|
[
"tests/test_funcutils_fb_py3.py::test_wraps_py3"
] |
[
"tests/test_funcutils_fb_py3.py::test_remove_kwonly_arg",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[a,",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[a,*,b-True]",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[*args-False]",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[*args,",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_add_arg_kwonly"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-12 20:19:41+00:00
|
bsd-2-clause
| 3,682 |
|
mahmoud__boltons-205
|
diff --git a/boltons/queueutils.py b/boltons/queueutils.py
index 59d478a..e061e16 100644
--- a/boltons/queueutils.py
+++ b/boltons/queueutils.py
@@ -68,12 +68,12 @@ class BasePriorityQueue(object):
Args:
priority_key (callable): A function that takes *priority* as
- passed in by :meth:`add` and returns an integer
+ passed in by :meth:`add` and returns a real number
representing the effective priority.
"""
# negating priority means larger numbers = higher priority
- _default_priority_key = staticmethod(lambda p: -int(p or 0))
+ _default_priority_key = staticmethod(lambda p: -float(p or 0))
_backend_type = list
def __init__(self, **kw):
|
mahmoud/boltons
|
a065dc576c1dc01e9539d973dfb961591ecee427
|
diff --git a/tests/test_queueutils.py b/tests/test_queueutils.py
index 8d83671..710a0cd 100644
--- a/tests/test_queueutils.py
+++ b/tests/test_queueutils.py
@@ -5,14 +5,36 @@ from boltons.queueutils import SortedPriorityQueue, HeapPriorityQueue
def _test_priority_queue(queue_type):
pq = queue_type()
- func = lambda x: x
- pq.add(func)
- pq.remove(func)
- pq.add(func)
- pq.add(func)
+ item1 = 'a'
+ item2 = 'b'
+ item3 = 'c'
+ pq.add(item1)
+ pq.remove(item1)
+
+ # integer priorities
+ pq.add(item1, 2)
+ pq.add(item2, 9)
+ pq.add(item3, 7)
+ assert len(pq) == 3
+ assert item2 == pq.pop()
+ assert len(pq) == 2
+ assert item3 == pq.pop()
assert len(pq) == 1
- assert func == pq.pop()
+ assert item1 == pq.pop()
assert len(pq) == 0
+
+ # float priorities
+ pq.add(item1, 0.2)
+ pq.add(item2, 0.9)
+ pq.add(item3, 0.7)
+ assert len(pq) == 3
+ assert item2 == pq.pop()
+ assert len(pq) == 2
+ assert item3 == pq.pop()
+ assert len(pq) == 1
+ assert item1 == pq.pop()
+ assert len(pq) == 0
+
try:
pq.pop()
except IndexError:
|
Priority queue does not support float priorities
Through a lot of painful debugging in a larger codebase, I found that both of boltons priority queue implementations treat a float priority `p` as if it were `int(p)`.
```
In [1]: from boltons.queueutils import PriorityQueue
In [2]: items = [('a', 0.8), ('b', 0.9), ('c', 0.6), ('d', 1.8), ('e', 1.9), ('f', 1.6)]
In [3]: q = PriorityQueue()
In [4]: for i, p in items: q.add(i, p)
In [5]: for i in range(6): print(q.pop())
d
e
f
a
b
c
```
See the behavior that personally I would expect in this situation, exhibited by a depq package for double-ended priority queues:
```
In [6]: from depq import DEPQ
In [7]: q = DEPQ(items)
In [8]: for i in range(6): print(q.popfirst())
('e', 1.9)
('d', 1.8)
('f', 1.6)
('b', 0.9)
('a', 0.8)
('c', 0.6)
```
I believe the current behavior should be explicitly documented. It's not clear that the priorities would be treated as if they were floored neither from the current docs nor from a brief look at the source.
|
0.0
|
a065dc576c1dc01e9539d973dfb961591ecee427
|
[
"tests/test_queueutils.py::test_heap_queue",
"tests/test_queueutils.py::test_sorted_queue"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-02-13 13:21:18+00:00
|
bsd-2-clause
| 3,683 |
|
mahmoud__boltons-244
|
diff --git a/boltons/funcutils.py b/boltons/funcutils.py
index f52a047..9e67787 100644
--- a/boltons/funcutils.py
+++ b/boltons/funcutils.py
@@ -38,6 +38,11 @@ try:
except ImportError:
NO_DEFAULT = object()
+try:
+ from functools import partialmethod
+except ImportError:
+ partialmethod = None
+
_IS_PY35 = sys.version_info >= (3, 5)
if not _IS_PY35:
@@ -242,10 +247,16 @@ class InstancePartial(functools.partial):
has the same ability, but is slightly more efficient.
"""
+ if partialmethod is not None: # NB: See https://github.com/mahmoud/boltons/pull/244
+ @property
+ def _partialmethod(self):
+ return partialmethod(self.func, *self.args, **self.keywords)
+
def __get__(self, obj, obj_type):
return make_method(self, obj, obj_type)
+
class CachedInstancePartial(functools.partial):
"""The ``CachedInstancePartial`` is virtually the same as
:class:`InstancePartial`, adding support for method-usage to
@@ -256,6 +267,11 @@ class CachedInstancePartial(functools.partial):
See the :class:`InstancePartial` docstring for more details.
"""
+ if partialmethod is not None: # NB: See https://github.com/mahmoud/boltons/pull/244
+ @property
+ def _partialmethod(self):
+ return partialmethod(self.func, *self.args, **self.keywords)
+
def __get__(self, obj, obj_type):
# These assignments could've been in __init__, but there was
# no simple way to do it without breaking one of PyPy or Py3.
@@ -278,6 +294,7 @@ class CachedInstancePartial(functools.partial):
obj.__dict__[name] = ret = make_method(self, obj, obj_type)
return ret
+
partial = CachedInstancePartial
@@ -428,8 +445,45 @@ def format_nonexp_repr(obj, req_names=None, opt_names=None, opt_key=None):
def wraps(func, injected=None, expected=None, **kw):
- """Modeled after the built-in :func:`functools.wraps`, this function is
- used to make your decorator's wrapper functions reflect the
+ """Decorator factory to apply update_wrapper() to a wrapper function.
+
+ Modeled after built-in :func:`functools.wraps`. Returns a decorator
+ that invokes update_wrapper() with the decorated function as the wrapper
+ argument and the arguments to wraps() as the remaining arguments.
+ Default arguments are as for update_wrapper(). This is a convenience
+ function to simplify applying partial() to update_wrapper().
+
+ Same example as in update_wrapper's doc but with wraps:
+
+ >>> from boltons.funcutils import wraps
+ >>>
+ >>> def print_return(func):
+ ... @wraps(func)
+ ... def wrapper(*args, **kwargs):
+ ... ret = func(*args, **kwargs)
+ ... print(ret)
+ ... return ret
+ ... return wrapper
+ ...
+ >>> @print_return
+ ... def example():
+ ... '''docstring'''
+ ... return 'example return value'
+ >>>
+ >>> val = example()
+ example return value
+ >>> example.__name__
+ 'example'
+ >>> example.__doc__
+ 'docstring'
+ """
+ return partial(update_wrapper, func=func, build_from=None,
+ injected=injected, expected=expected, **kw)
+
+
+def update_wrapper(wrapper, func, injected=None, expected=None, build_from=None, **kw):
+ """Modeled after the built-in :func:`functools.update_wrapper`,
+ this function is used to make your wrapper function reflect the
wrapped function's:
* Name
@@ -437,21 +491,20 @@ def wraps(func, injected=None, expected=None, **kw):
* Module
* Signature
- The built-in :func:`functools.wraps` copies the first three, but
- does not copy the signature. This version of ``wraps`` can copy
+ The built-in :func:`functools.update_wrapper` copies the first three, but
+ does not copy the signature. This version of ``update_wrapper`` can copy
the inner function's signature exactly, allowing seamless usage
and :mod:`introspection <inspect>`. Usage is identical to the
built-in version::
- >>> from boltons.funcutils import wraps
+ >>> from boltons.funcutils import update_wrapper
>>>
>>> def print_return(func):
- ... @wraps(func)
... def wrapper(*args, **kwargs):
... ret = func(*args, **kwargs)
... print(ret)
... return ret
- ... return wrapper
+ ... return update_wrapper(wrapper, func)
...
>>> @print_return
... def example():
@@ -465,14 +518,16 @@ def wraps(func, injected=None, expected=None, **kw):
>>> example.__doc__
'docstring'
- In addition, the boltons version of wraps supports modifying the
- outer signature based on the inner signature. By passing a list of
+ In addition, the boltons version of update_wrapper supports
+ modifying the outer signature. By passing a list of
*injected* argument names, those arguments will be removed from
the outer wrapper's signature, allowing your decorator to provide
arguments that aren't passed in.
Args:
+ wrapper (function) : The callable to which the attributes of
+ *func* are to be copied.
func (function): The callable whose attributes are to be copied.
injected (list): An optional list of argument names which
should not appear in the new wrapper's signature.
@@ -480,14 +535,22 @@ def wraps(func, injected=None, expected=None, **kw):
default) pairs) representing new arguments introduced by
the wrapper (the opposite of *injected*). See
:meth:`FunctionBuilder.add_arg()` for more details.
+ build_from (function): The callable from which the new wrapper
+ is built. Defaults to *func*, unless *wrapper* is partial object
+ built from *func*, in which case it defaults to *wrapper*.
+ Useful in some specific cases where *wrapper* and *func* have the
+ same arguments but differ on which are keyword-only and positional-only.
update_dict (bool): Whether to copy other, non-standard
attributes of *func* over to the wrapper. Defaults to True.
inject_to_varkw (bool): Ignore missing arguments when a
``**kwargs``-type catch-all is present. Defaults to True.
+ hide_wrapped (bool): Remove reference to the wrapped function(s)
+ in the updated function.
+ In opposition to the built-in :func:`functools.update_wrapper` bolton's
+ version returns a copy of the function and does not modifiy anything in place.
For more in-depth wrapping of functions, see the
- :class:`FunctionBuilder` type, on which wraps was built.
-
+ :class:`FunctionBuilder` type, on which update_wrapper was built.
"""
if injected is None:
injected = []
@@ -506,10 +569,15 @@ def wraps(func, injected=None, expected=None, **kw):
update_dict = kw.pop('update_dict', True)
inject_to_varkw = kw.pop('inject_to_varkw', True)
+ hide_wrapped = kw.pop('hide_wrapped', False)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
- fb = FunctionBuilder.from_func(func)
+ if isinstance(wrapper, functools.partial) and func is wrapper.func:
+ build_from = build_from or wrapper
+
+ fb = FunctionBuilder.from_func(build_from or func)
+
for arg in injected:
try:
fb.remove_arg(arg)
@@ -526,14 +594,15 @@ def wraps(func, injected=None, expected=None, **kw):
else:
fb.body = 'return _call(%s)' % fb.get_invocation_str()
- def wrapper_wrapper(wrapper_func):
- execdict = dict(_call=wrapper_func, _func=func)
- fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
- fully_wrapped.__wrapped__ = func # ref to the original function (#115)
+ execdict = dict(_call=wrapper, _func=func)
+ fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
- return fully_wrapped
+ if hide_wrapped and hasattr(fully_wrapped, '__wrapped__'):
+ del fully_wrapped.__dict__['__wrapped__']
+ elif not hide_wrapped:
+ fully_wrapped.__wrapped__ = func # ref to the original function (#115)
- return wrapper_wrapper
+ return fully_wrapped
def _parse_wraps_expected(expected):
@@ -766,11 +835,20 @@ class FunctionBuilder(object):
if not callable(func):
raise TypeError('expected callable object, not %r' % (func,))
- kwargs = {'name': func.__name__,
- 'doc': func.__doc__,
- 'module': getattr(func, '__module__', None), # e.g., method_descriptor
- 'annotations': getattr(func, "__annotations__", {}),
- 'dict': getattr(func, '__dict__', {})}
+ if isinstance(func, functools.partial):
+ if _IS_PY2:
+ raise ValueError('Cannot build FunctionBuilder instances from partials in python 2.')
+ kwargs = {'name': func.func.__name__,
+ 'doc': func.func.__doc__,
+ 'module': getattr(func.func, '__module__', None), # e.g., method_descriptor
+ 'annotations': getattr(func.func, "__annotations__", {}),
+ 'dict': getattr(func.func, '__dict__', {})}
+ else:
+ kwargs = {'name': func.__name__,
+ 'doc': func.__doc__,
+ 'module': getattr(func, '__module__', None), # e.g., method_descriptor
+ 'annotations': getattr(func, "__annotations__", {}),
+ 'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
diff --git a/boltons/socketutils.py b/boltons/socketutils.py
index 86e7187..d72e2c1 100644
--- a/boltons/socketutils.py
+++ b/boltons/socketutils.py
@@ -635,23 +635,32 @@ class NetstringSocket(object):
def setmaxsize(self, maxsize):
self.maxsize = maxsize
- self._msgsize_maxsize = len(str(maxsize)) + 1 # len(str()) == log10
+ self._msgsize_maxsize = self._calc_msgsize_maxsize(maxsize)
+
+ def _calc_msgsize_maxsize(self, maxsize):
+ return len(str(maxsize)) + 1 # len(str()) == log10
def read_ns(self, timeout=_UNSET, maxsize=_UNSET):
if timeout is _UNSET:
timeout = self.timeout
+ if maxsize is _UNSET:
+ maxsize = self.maxsize
+ msgsize_maxsize = self._msgsize_maxsize
+ else:
+ msgsize_maxsize = self._calc_msgsize_maxsize(maxsize)
+
size_prefix = self.bsock.recv_until(b':',
- timeout=self.timeout,
- maxsize=self._msgsize_maxsize)
+ timeout=timeout,
+ maxsize=msgsize_maxsize)
try:
size = int(size_prefix)
except ValueError:
raise NetstringInvalidSize('netstring message size must be valid'
' integer, not %r' % size_prefix)
- if size > self.maxsize:
- raise NetstringMessageTooLong(size, self.maxsize)
+ if size > maxsize:
+ raise NetstringMessageTooLong(size, maxsize)
payload = self.bsock.recv_size(size)
if self.bsock.recv(1) != b',':
raise NetstringProtocolError("expected trailing ',' after message")
diff --git a/boltons/strutils.py b/boltons/strutils.py
index a2ad303..094c3b5 100644
--- a/boltons/strutils.py
+++ b/boltons/strutils.py
@@ -281,14 +281,14 @@ _IRR_S2P = {'addendum': 'addenda', 'alga': 'algae', 'alumna': 'alumnae',
'ox': 'oxen', 'paralysis': 'paralyses', 'parenthesis': 'parentheses',
'person': 'people', 'phenomenon': 'phenomena', 'potato': 'potatoes',
'radius': 'radii', 'scarf': 'scarves', 'scissors': 'scissors',
- 'self': 'selves', 'series': 'series', 'sheep': 'sheep',
- 'shelf': 'shelves', 'species': 'species', 'stimulus': 'stimuli',
- 'stratum': 'strata', 'syllabus': 'syllabi', 'symposium': 'symposia',
- 'synopsis': 'synopses', 'synthesis': 'syntheses', 'tableau': 'tableaux',
- 'that': 'those', 'thesis': 'theses', 'thief': 'thieves',
- 'this': 'these', 'tomato': 'tomatoes', 'tooth': 'teeth',
- 'torpedo': 'torpedoes', 'vertebra': 'vertebrae', 'veto': 'vetoes',
- 'vita': 'vitae', 'watch': 'watches', 'wife': 'wives',
+ 'self': 'selves', 'sense': 'senses', 'series': 'series', 'sheep':
+ 'sheep', 'shelf': 'shelves', 'species': 'species', 'stimulus':
+ 'stimuli', 'stratum': 'strata', 'syllabus': 'syllabi', 'symposium':
+ 'symposia', 'synopsis': 'synopses', 'synthesis': 'syntheses',
+ 'tableau': 'tableaux', 'that': 'those', 'thesis': 'theses',
+ 'thief': 'thieves', 'this': 'these', 'tomato': 'tomatoes', 'tooth':
+ 'teeth', 'torpedo': 'torpedoes', 'vertebra': 'vertebrae', 'veto':
+ 'vetoes', 'vita': 'vitae', 'watch': 'watches', 'wife': 'wives',
'wolf': 'wolves', 'woman': 'women'}
|
mahmoud/boltons
|
9c99fd6e4a345803a43003284cd2aed8a772eb01
|
diff --git a/tests/test_funcutils.py b/tests/test_funcutils.py
index a4b6793..783399d 100644
--- a/tests/test_funcutils.py
+++ b/tests/test_funcutils.py
@@ -34,6 +34,15 @@ def test_partials():
assert g.cached_partial_greet() == 'Hello...'
assert CachedInstancePartial(g.greet, excitement='s')() == 'Hellos'
+ g.native_greet = 'native reassigned'
+ assert g.native_greet == 'native reassigned'
+
+ g.partial_greet = 'partial reassigned'
+ assert g.partial_greet == 'partial reassigned'
+
+ g.cached_partial_greet = 'cached_partial reassigned'
+ assert g.cached_partial_greet == 'cached_partial reassigned'
+
def test_copy_function():
def callee():
diff --git a/tests/test_funcutils_fb.py b/tests/test_funcutils_fb.py
index 77e5a6d..05d1582 100644
--- a/tests/test_funcutils_fb.py
+++ b/tests/test_funcutils_fb.py
@@ -1,5 +1,4 @@
import pytest
-
from boltons.funcutils import wraps, FunctionBuilder
diff --git a/tests/test_funcutils_fb_py3.py b/tests/test_funcutils_fb_py3.py
index e6af8e0..1315473 100644
--- a/tests/test_funcutils_fb_py3.py
+++ b/tests/test_funcutils_fb_py3.py
@@ -1,10 +1,16 @@
import inspect
+import functools
from collections import defaultdict
import pytest
-from boltons.funcutils import wraps, FunctionBuilder
+from boltons.funcutils import wraps, FunctionBuilder, update_wrapper
+import boltons.funcutils as funcutils
+
+
+def wrappable_varkw_func(a, b, **kw):
+ return a, b
def pita_wrap(flag=False):
@@ -47,6 +53,14 @@ def test_wraps_py3():
True, 'kwonly_non_roundtrippable_repr', 2)
[email protected]('partial_kind', (functools, funcutils))
+def test_update_wrapper_partial(partial_kind):
+ wrapper = partial_kind.partial(wrappable_varkw_func, b=1)
+
+ fully_wrapped = update_wrapper(wrapper, wrappable_varkw_func)
+ assert fully_wrapped(1) == (1, 1)
+
+
def test_remove_kwonly_arg():
# example adapted from https://github.com/mahmoud/boltons/issues/123
diff --git a/tests/test_funcutils_fb_py37.py b/tests/test_funcutils_fb_py37.py
index fc2f74b..7b10f72 100644
--- a/tests/test_funcutils_fb_py37.py
+++ b/tests/test_funcutils_fb_py37.py
@@ -5,6 +5,10 @@ import inspect
from boltons.funcutils import wraps, FunctionBuilder
+def wrappable_func(a, b):
+ return a, b
+
+
def test_wraps_async():
# from https://github.com/mahmoud/boltons/issues/194
import asyncio
@@ -49,3 +53,21 @@ def test_wraps_async():
# lol windows py37 somehow completes this in under 0.3
# "assert 0.29700000000002547 > 0.3" https://ci.appveyor.com/project/mahmoud/boltons/builds/22261051/job/3jfq1tq2233csqp6
assert duration > 0.25
+
+
+def test_wraps_hide_wrapped():
+ new_func = wraps(wrappable_func, injected='b')(lambda a: wrappable_func(a, b=1))
+ new_sig = inspect.signature(new_func, follow_wrapped=True)
+
+ assert list(new_sig.parameters.keys()) == ['a', 'b']
+
+ new_func = wraps(wrappable_func, injected='b', hide_wrapped=True)(lambda a: wrappable_func(a, b=1))
+ new_sig = inspect.signature(new_func, follow_wrapped=True)
+
+ assert list(new_sig.parameters.keys()) == ['a']
+
+ new_func = wraps(wrappable_func, injected='b')(lambda a: wrappable_func(a, b=1))
+ new_new_func = wraps(new_func, injected='a', hide_wrapped=True)(lambda: new_func(a=1))
+ new_new_sig = inspect.signature(new_new_func, follow_wrapped=True)
+
+ assert len(new_new_sig.parameters) == 0
diff --git a/tests/test_socketutils.py b/tests/test_socketutils.py
index 90d988d..dd095eb 100644
--- a/tests/test_socketutils.py
+++ b/tests/test_socketutils.py
@@ -248,9 +248,8 @@ def test_timeout_setters_getters():
def netstring_server(server_socket):
"A basic netstring server loop, supporting a few operations"
- running = True
try:
- while running:
+ while True:
clientsock, addr = server_socket.accept()
client = NetstringSocket(clientsock)
while 1:
@@ -259,8 +258,7 @@ def netstring_server(server_socket):
clientsock.close()
break
elif request == b'shutdown':
- running = False
- break
+ return
elif request == b'reply4k':
client.write_ns(b'a' * 4096)
elif request == b'ping':
@@ -272,7 +270,6 @@ def netstring_server(server_socket):
except Exception as e:
print(u'netstring_server exiting with error: %r' % e)
raise
- return
def test_socketutils_netstring():
@@ -376,3 +373,57 @@ def test_socketutils_netstring():
client.write_ns(b'shutdown')
print("all passed")
+
+
+def netstring_server_timeout_override(server_socket):
+ """Netstring socket has an unreasonably low timeout,
+ however it should be overriden by the `read_ns` argument."""
+
+ try:
+ while True:
+ clientsock, addr = server_socket.accept()
+ client = NetstringSocket(clientsock, timeout=0.01)
+ while 1:
+ request = client.read_ns(1)
+ if request == b'close':
+ clientsock.close()
+ break
+ elif request == b'shutdown':
+ return
+ elif request == b'ping':
+ client.write_ns(b'pong')
+ except Exception as e:
+ print(u'netstring_server exiting with error: %r' % e)
+ raise
+
+
+def test_socketutils_netstring_timeout():
+ """Tests that server socket timeout is overriden by the argument to read call.
+
+ Server has timeout of 10 ms, and we will sleep for 20 ms. If timeout is not overriden correctly,
+ a timeout exception will be raised."""
+
+ print("running timeout test")
+
+ # Set up server
+ server_socket = socket.socket()
+ server_socket.bind(('127.0.0.1', 0)) # localhost with ephemeral port
+ server_socket.listen(100)
+ ip, port = server_socket.getsockname()
+ start_server = lambda: netstring_server_timeout_override(server_socket)
+ threading.Thread(target=start_server).start()
+
+ # set up client
+ def client_connect():
+ clientsock = socket.create_connection((ip, port))
+ client = NetstringSocket(clientsock)
+ return client
+
+ # connect, ping-pong
+ client = client_connect()
+ time.sleep(0.02)
+ client.write_ns(b'ping')
+ assert client.read_ns() == b'pong'
+
+ client.write_ns(b'shutdown')
+ print("no timeout occured - all good.")
\ No newline at end of file
|
funcutils.wraps() not working with partials
Hi!
With boltons 20.0.0 (on Linux), I get errors when trying to wrap partials. My goal is to fix the docstring and signature of the wrapped partial, so it acts as a normal function, with an option to "hide" the given arguments with "injected".
Examples:
```
from functools import partial
from boltons.funcutils import wraps
def func(a, b=1, c=1):
"""Docstring"""
return (a, b, c)
newf = wraps(func)(partial(func, b=2))
newf(1)
# Expected (1, 2, 1)
# Got : TypeError : func() got multiple values for argument 'b'
```
After inspection, I believe this is due to `partial` modifying the signature of `func` by changing `b` and all subsequent args to "keyword-only". Which `wraps` doesn't see as it builds the new function from `func` directly.
```
from inspect import signature
signature(partial(func, b=2)) # <Signature (a, *, b=2, c=1)>
signature(newf, follow_wrapped) # <Signature (a, b=1, c=1)>
```
So. First of all, is this a bug of `boltons.funcutils.wraps` or is this just not in the scope of what it is supposed to do?
I could use `functools.update_wrapper` but I really wanted to "inject" the new keywords, make them disappear from the signature. Also, using `help()` on a "updated" partial doesn't work, while it does on `wraps(func)(partial(func))`.
I quickly modified `wraps` and was able to obtain the wanted behaviour by passing the partial func directly and using that one to create the underlying `FunctionBuilder` instance. It works since the arguments are the same, just with modified properties and defaults, but it kinda doesn't make sense since `wraps` is supposed to be a decorator. Thus a solution I see is to move the code from `wraps` to a new `update_wrapper`. Example:
```
def update_wrapper(wrapper, wrapped, build_from=None, injected=None, expected=None, **kw):
... same code as `wraps` but:
fb = FunctionBuilder.from_func(build_from or wrapped)
... and it returns "fully_wrapped"
def wraps(func, injected=None, expected=None, **kw):
def wrapper_wrapper(wrapper_func):
return update_wrapper(wrapper_func, func, injected=injected, expected=expected, **kw)
return wrapper_wrapper
```
What do you think?
(If needed or wanted, I have time to suggest a PR on this)
|
0.0
|
9c99fd6e4a345803a43003284cd2aed8a772eb01
|
[
"tests/test_funcutils.py::test_partials",
"tests/test_funcutils.py::test_copy_function",
"tests/test_funcutils.py::test_total_ordering",
"tests/test_funcutils.py::test_format_invocation",
"tests/test_funcutils_fb.py::test_wraps_basic",
"tests/test_funcutils_fb.py::test_wraps_injected",
"tests/test_funcutils_fb.py::test_wraps_update_dict",
"tests/test_funcutils_fb.py::test_wraps_unknown_args",
"tests/test_funcutils_fb.py::test_FunctionBuilder_invalid_args",
"tests/test_funcutils_fb.py::test_FunctionBuilder_invalid_body",
"tests/test_funcutils_fb.py::test_FunctionBuilder_modify",
"tests/test_funcutils_fb.py::test_wraps_wrappers",
"tests/test_funcutils_fb.py::test_FunctionBuilder_add_arg",
"tests/test_funcutils_fb.py::test_wraps_expected",
"tests/test_funcutils_fb.py::test_defaults_dict",
"tests/test_funcutils_fb.py::test_get_arg_names",
"tests/test_funcutils_fb.py::test_get_invocation_sig_str[args0-None-None-None-a,",
"tests/test_funcutils_fb.py::test_get_invocation_sig_str[None-args-kwargs-None-*args,",
"tests/test_funcutils_fb.py::test_get_invocation_sig_str[a-None-None-defaults2-a-(a)]",
"tests/test_funcutils_fb_py3.py::test_wraps_py3",
"tests/test_funcutils_fb_py3.py::test_update_wrapper_partial[functools]",
"tests/test_funcutils_fb_py3.py::test_update_wrapper_partial[boltons.funcutils]",
"tests/test_funcutils_fb_py3.py::test_remove_kwonly_arg",
"tests/test_funcutils_fb_py3.py::test_defaults_dict",
"tests/test_funcutils_fb_py3.py::test_get_arg_names",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[a,",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[a,*,b-True]",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[*args-False]",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_KWONLY_MARKER[*args,",
"tests/test_funcutils_fb_py3.py::test_FunctionBuilder_add_arg_kwonly",
"tests/test_funcutils_fb_py3.py::test_get_invocation_sig_str[None-args-kwargs-None-a-kwonlydefaults0-*args,",
"tests/test_funcutils_fb_py37.py::test_wraps_async",
"tests/test_funcutils_fb_py37.py::test_wraps_hide_wrapped",
"tests/test_socketutils.py::test_short_lines",
"tests/test_socketutils.py::test_multibyte_delim",
"tests/test_socketutils.py::test_props",
"tests/test_socketutils.py::test_buffers",
"tests/test_socketutils.py::test_client_disconnecting",
"tests/test_socketutils.py::test_split_delim",
"tests/test_socketutils.py::test_basic_nonblocking",
"tests/test_socketutils.py::test_simple_buffered_socket_passthroughs",
"tests/test_socketutils.py::test_timeout_setters_getters",
"tests/test_socketutils.py::test_socketutils_netstring",
"tests/test_socketutils.py::test_socketutils_netstring_timeout"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-18 19:30:23+00:00
|
bsd-2-clause
| 3,684 |
|
mahmoud__boltons-271
|
diff --git a/boltons/dictutils.py b/boltons/dictutils.py
index ce5884c..8d1a4e7 100644
--- a/boltons/dictutils.py
+++ b/boltons/dictutils.py
@@ -1076,7 +1076,7 @@ class FrozenDict(dict):
"raises a TypeError, because FrozenDicts are immutable"
raise TypeError('%s object is immutable' % self.__class__.__name__)
- __setitem__ = __delitem__ = update = _raise_frozen_typeerror
+ __ior__ = __setitem__ = __delitem__ = update = _raise_frozen_typeerror
setdefault = pop = popitem = clear = _raise_frozen_typeerror
del _raise_frozen_typeerror
|
mahmoud/boltons
|
3dfa387edf5f20d5b6e483c7ba7260fbe9930537
|
diff --git a/tests/test_dictutils.py b/tests/test_dictutils.py
index b6873a8..6eac812 100644
--- a/tests/test_dictutils.py
+++ b/tests/test_dictutils.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
+import sys
import pytest
from boltons.dictutils import OMD, OneToOne, ManyToMany, FrozenDict, subdict, FrozenHashError
@@ -432,6 +433,15 @@ def test_frozendict():
return
[email protected](sys.version_info < (3, 9), reason="requires python3.9 or higher")
+def test_frozendict_ior():
+ data = {'a': 'A', 'b': 'B'}
+ fd = FrozenDict(data)
+
+ with pytest.raises(TypeError, match=".*FrozenDicts are immutable.*"):
+ fd |= fd
+
+
def test_frozendict_api():
# all the read-only methods that are fine
through_methods = ['__class__',
@@ -452,8 +462,10 @@ def test_frozendict_api():
'__lt__',
'__ne__',
'__new__',
+ '__or__',
'__reduce__',
'__reversed__',
+ '__ror__',
'__setattr__',
'__sizeof__',
'__str__',
|
python 3.9 test failure version 20.2.1
=================================== FAILURES ===================================
_____________________________ test_frozendict_api ______________________________
def test_frozendict_api():
# all the read-only methods that are fine
through_methods = ['__class__',
'__cmp__',
'__contains__',
'__delattr__',
'__dir__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getitem__',
'__gt__',
'__init__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__ne__',
'__new__',
'__reduce__',
'__reversed__',
'__setattr__',
'__sizeof__',
'__str__',
'copy',
'get',
'has_key',
'items',
'iteritems',
'iterkeys',
'itervalues',
'keys',
'values',
'viewitems',
'viewkeys',
'viewvalues']
fd = FrozenDict()
ret = []
for attrname in dir(fd):
if attrname == '_hash': # in the dir, even before it's set
continue
attr = getattr(fd, attrname)
if not callable(attr):
continue
if getattr(FrozenDict, attrname) == getattr(dict, attrname, None) and attrname not in through_methods:
> assert attrname == False
E AssertionError: assert '__ior__' == False
tests/test_dictutils.py:483: AssertionError
=========================== short test summary info ============================
FAILED tests/test_dictutils.py::test_frozendict_api - AssertionError: assert .
|
0.0
|
3dfa387edf5f20d5b6e483c7ba7260fbe9930537
|
[
"tests/test_dictutils.py::test_frozendict_api"
] |
[
"tests/test_dictutils.py::test_dict_init",
"tests/test_dictutils.py::test_todict",
"tests/test_dictutils.py::test_eq",
"tests/test_dictutils.py::test_copy",
"tests/test_dictutils.py::test_clear",
"tests/test_dictutils.py::test_types",
"tests/test_dictutils.py::test_multi_correctness",
"tests/test_dictutils.py::test_kv_consistency",
"tests/test_dictutils.py::test_update_basic",
"tests/test_dictutils.py::test_update",
"tests/test_dictutils.py::test_update_extend",
"tests/test_dictutils.py::test_invert",
"tests/test_dictutils.py::test_poplast",
"tests/test_dictutils.py::test_pop",
"tests/test_dictutils.py::test_pop_all",
"tests/test_dictutils.py::test_reversed",
"tests/test_dictutils.py::test_setdefault",
"tests/test_dictutils.py::test_subdict",
"tests/test_dictutils.py::test_subdict_keep_type",
"tests/test_dictutils.py::test_one_to_one",
"tests/test_dictutils.py::test_many_to_many",
"tests/test_dictutils.py::test_frozendict"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-18 13:16:53+00:00
|
bsd-2-clause
| 3,685 |
|
mahmoud__boltons-298
|
diff --git a/boltons/urlutils.py b/boltons/urlutils.py
index 6016e8a..7462ed4 100644
--- a/boltons/urlutils.py
+++ b/boltons/urlutils.py
@@ -685,7 +685,8 @@ class URL(object):
if dest.path.startswith(u'/'): # absolute path
new_path_parts = list(dest.path_parts)
else: # relative path
- new_path_parts = self.path_parts[:-1] + dest.path_parts
+ new_path_parts = list(self.path_parts[:-1]) \
+ + list(dest.path_parts)
else:
new_path_parts = list(self.path_parts)
if not query_params:
|
mahmoud/boltons
|
270e974975984f662f998c8f6eb0ebebd964de82
|
diff --git a/tests/test_urlutils.py b/tests/test_urlutils.py
index a8e1539..2351899 100644
--- a/tests/test_urlutils.py
+++ b/tests/test_urlutils.py
@@ -313,6 +313,25 @@ def test_navigate():
assert navd.to_text() == _dest_text
[email protected](
+ ('expected', 'base', 'paths'), [
+ ('https://host/b', 'https://host', ('a', '/b', )),
+ ('https://host/b', 'https://host', ('a', 'b', )),
+ ('https://host/a/b', 'https://host', ('a/', 'b', )),
+ ('https://host/b', 'https://host', ('/a', 'b', )),
+ ('https://host/a/b', 'https://host/a/', (None, 'b', )),
+ ('https://host/b', 'https://host/a', (None, 'b', )),
+])
+def test_chained_navigate(expected, base, paths):
+ """Chained :meth:`navigate` calls produces correct results."""
+ url = URL(base)
+
+ for path in paths:
+ url = url.navigate(path)
+
+ assert expected == url.to_text()
+
+
# TODO: RFC3986 6.2.3 (not just for query add, either)
# def test_add_query():
# url = URL('http://www.example.com')
|
URL.navigate fails with path in base URL
The following and several permutations fail attempting to concatenate a `tuple` with a `list`:
```
URL('http://localhost').navigate('a').navigate('b')
```
Result:
```
...
else: # relative path
> new_path_parts = list(self.path_parts[:-1]) + dest.path_parts
E TypeError: can only concatenate list (not "tuple") to list
boltons/urlutils.py:657: TypeError
```
I am a little unclear about exactly what *should* happen when 'a' has a trailing slash or not, but enough cases fail that I know *something* is wrong.
I will follow shortly with a PR with a number of test cases, some of which I know are wrong, so I am hoping I can get some clarification about what the correct behavior *should* be.
This is not strictly an issue with chaining the `navigate` calls, but rather ambiguity of what should happen with the base URL lacks a trailing '/' and failures that result from that.
Env data:
platform darwin
Python 3.6.5 (built w/pyenv)
pytest-3.5.1
|
0.0
|
270e974975984f662f998c8f6eb0ebebd964de82
|
[
"tests/test_urlutils.py::test_chained_navigate[https://host/b-https://host-paths1]",
"tests/test_urlutils.py::test_chained_navigate[https://host/a/b-https://host-paths2]",
"tests/test_urlutils.py::test_chained_navigate[https://host/b-https://host-paths3]",
"tests/test_urlutils.py::test_chained_navigate[https://host/a/b-https://host/a/-paths4]",
"tests/test_urlutils.py::test_chained_navigate[https://host/b-https://host/a-paths5]"
] |
[
"tests/test_urlutils.py::test_regex[http://googlewebsite.com/e-shops.aspx]",
"tests/test_urlutils.py::test_roundtrip[http://googlewebsite.com/e-shops.aspx]",
"tests/test_urlutils.py::test_query_params[http://googlewebsite.com/e-shops.aspx]",
"tests/test_urlutils.py::test_regex[http://example.com:8080/search?q=123&business=Nothing%20Special]",
"tests/test_urlutils.py::test_roundtrip[http://example.com:8080/search?q=123&business=Nothing%20Special]",
"tests/test_urlutils.py::test_query_params[http://example.com:8080/search?q=123&business=Nothing%20Special]",
"tests/test_urlutils.py::test_regex[http://hatnote.com:9000?arg=1&arg=2&arg=3]",
"tests/test_urlutils.py::test_roundtrip[http://hatnote.com:9000?arg=1&arg=2&arg=3]",
"tests/test_urlutils.py::test_query_params[http://hatnote.com:9000?arg=1&arg=2&arg=3]",
"tests/test_urlutils.py::test_regex[https://xn--bcher-kva.ch]",
"tests/test_urlutils.py::test_roundtrip[https://xn--bcher-kva.ch]",
"tests/test_urlutils.py::test_query_params[https://xn--bcher-kva.ch]",
"tests/test_urlutils.py::test_regex[http://xn--ggbla1c4e.xn--ngbc5azd/]",
"tests/test_urlutils.py::test_roundtrip[http://xn--ggbla1c4e.xn--ngbc5azd/]",
"tests/test_urlutils.py::test_query_params[http://xn--ggbla1c4e.xn--ngbc5azd/]",
"tests/test_urlutils.py::test_regex[http://tools.ietf.org/html/rfc3986#section-3.4]",
"tests/test_urlutils.py::test_roundtrip[http://tools.ietf.org/html/rfc3986#section-3.4]",
"tests/test_urlutils.py::test_query_params[http://tools.ietf.org/html/rfc3986#section-3.4]",
"tests/test_urlutils.py::test_regex[http://wiki:[email protected]]",
"tests/test_urlutils.py::test_roundtrip[http://wiki:[email protected]]",
"tests/test_urlutils.py::test_query_params[http://wiki:[email protected]]",
"tests/test_urlutils.py::test_regex[ftp://ftp.rfc-editor.org/in-notes/tar/RFCs0001-0500.tar.gz]",
"tests/test_urlutils.py::test_roundtrip[ftp://ftp.rfc-editor.org/in-notes/tar/RFCs0001-0500.tar.gz]",
"tests/test_urlutils.py::test_query_params[ftp://ftp.rfc-editor.org/in-notes/tar/RFCs0001-0500.tar.gz]",
"tests/test_urlutils.py::test_regex[http://[1080:0:0:0:8:800:200C:417A]/index.html]",
"tests/test_urlutils.py::test_roundtrip[http://[1080:0:0:0:8:800:200C:417A]/index.html]",
"tests/test_urlutils.py::test_query_params[http://[1080:0:0:0:8:800:200C:417A]/index.html]",
"tests/test_urlutils.py::test_regex[ssh://192.0.2.16:2222/]",
"tests/test_urlutils.py::test_roundtrip[ssh://192.0.2.16:2222/]",
"tests/test_urlutils.py::test_query_params[ssh://192.0.2.16:2222/]",
"tests/test_urlutils.py::test_regex[https://[::101.45.75.219]:80/?hi=bye]",
"tests/test_urlutils.py::test_roundtrip[https://[::101.45.75.219]:80/?hi=bye]",
"tests/test_urlutils.py::test_query_params[https://[::101.45.75.219]:80/?hi=bye]",
"tests/test_urlutils.py::test_regex[ldap://[::192.9.5.5]/dc=example,dc=com??sub?(sn=Jensen)]",
"tests/test_urlutils.py::test_roundtrip[ldap://[::192.9.5.5]/dc=example,dc=com??sub?(sn=Jensen)]",
"tests/test_urlutils.py::test_query_params[ldap://[::192.9.5.5]/dc=example,dc=com??sub?(sn=Jensen)]",
"tests/test_urlutils.py::test_regex[mailto:[email protected][email protected]&body=hi%20http://wikipedia.org]",
"tests/test_urlutils.py::test_roundtrip[mailto:[email protected][email protected]&body=hi%20http://wikipedia.org]",
"tests/test_urlutils.py::test_query_params[mailto:[email protected][email protected]&body=hi%20http://wikipedia.org]",
"tests/test_urlutils.py::test_regex[news:alt.rec.motorcycle]",
"tests/test_urlutils.py::test_roundtrip[news:alt.rec.motorcycle]",
"tests/test_urlutils.py::test_query_params[news:alt.rec.motorcycle]",
"tests/test_urlutils.py::test_regex[tel:+1-800-867-5309]",
"tests/test_urlutils.py::test_roundtrip[tel:+1-800-867-5309]",
"tests/test_urlutils.py::test_query_params[tel:+1-800-867-5309]",
"tests/test_urlutils.py::test_regex[urn:oasis:member:A00024:x]",
"tests/test_urlutils.py::test_roundtrip[urn:oasis:member:A00024:x]",
"tests/test_urlutils.py::test_query_params[urn:oasis:member:A00024:x]",
"tests/test_urlutils.py::test_regex[magnet:?xt=urn:btih:1a42b9e04e122b97a5254e3df77ab3c4b7da725f&dn=Puppy%20Linux%20precise-5.7.1.iso&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.publicbt.com:80&tr=udp://tracker.istole.it:6969&tr=udp://tracker.ccc.de:80&tr=udp://open.demonii.com:1337]",
"tests/test_urlutils.py::test_roundtrip[magnet:?xt=urn:btih:1a42b9e04e122b97a5254e3df77ab3c4b7da725f&dn=Puppy%20Linux%20precise-5.7.1.iso&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.publicbt.com:80&tr=udp://tracker.istole.it:6969&tr=udp://tracker.ccc.de:80&tr=udp://open.demonii.com:1337]",
"tests/test_urlutils.py::test_query_params[magnet:?xt=urn:btih:1a42b9e04e122b97a5254e3df77ab3c4b7da725f&dn=Puppy%20Linux%20precise-5.7.1.iso&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.publicbt.com:80&tr=udp://tracker.istole.it:6969&tr=udp://tracker.ccc.de:80&tr=udp://open.demonii.com:1337]",
"tests/test_urlutils.py::test_regex[http://localhost]",
"tests/test_urlutils.py::test_roundtrip[http://localhost]",
"tests/test_urlutils.py::test_query_params[http://localhost]",
"tests/test_urlutils.py::test_regex[http://localhost/]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/]",
"tests/test_urlutils.py::test_query_params[http://localhost/]",
"tests/test_urlutils.py::test_regex[http://localhost/foo]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo]",
"tests/test_urlutils.py::test_regex[http://localhost/foo/]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo/]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo/]",
"tests/test_urlutils.py::test_regex[http://localhost/foo!!bar/]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo!!bar/]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo!!bar/]",
"tests/test_urlutils.py::test_regex[http://localhost/foo%20bar/]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo%20bar/]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo%20bar/]",
"tests/test_urlutils.py::test_regex[http://localhost/foo%2Fbar/]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo%2Fbar/]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo%2Fbar/]",
"tests/test_urlutils.py::test_regex[http://localhost/foo?n]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo?n]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo?n]",
"tests/test_urlutils.py::test_regex[http://localhost/foo?n=v]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo?n=v]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo?n=v]",
"tests/test_urlutils.py::test_regex[http://localhost/foo?n=/a/b]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/foo?n=/a/b]",
"tests/test_urlutils.py::test_query_params[http://localhost/foo?n=/a/b]",
"tests/test_urlutils.py::test_regex[http://example.com/foo!@$bar?b!@z=123]",
"tests/test_urlutils.py::test_roundtrip[http://example.com/foo!@$bar?b!@z=123]",
"tests/test_urlutils.py::test_query_params[http://example.com/foo!@$bar?b!@z=123]",
"tests/test_urlutils.py::test_regex[http://localhost/asd?a=asd%20sdf/345]",
"tests/test_urlutils.py::test_roundtrip[http://localhost/asd?a=asd%20sdf/345]",
"tests/test_urlutils.py::test_query_params[http://localhost/asd?a=asd%20sdf/345]",
"tests/test_urlutils.py::test_regex[http://(%2525)/(%2525)?(%2525)&(%2525)=(%2525)#(%2525)]",
"tests/test_urlutils.py::test_roundtrip[http://(%2525)/(%2525)?(%2525)&(%2525)=(%2525)#(%2525)]",
"tests/test_urlutils.py::test_query_params[http://(%2525)/(%2525)?(%2525)&(%2525)=(%2525)#(%2525)]",
"tests/test_urlutils.py::test_regex[http://(%C3%A9)/(%C3%A9)?(%C3%A9)&(%C3%A9)=(%C3%A9)#(%C3%A9)]",
"tests/test_urlutils.py::test_roundtrip[http://(%C3%A9)/(%C3%A9)?(%C3%A9)&(%C3%A9)=(%C3%A9)#(%C3%A9)]",
"tests/test_urlutils.py::test_query_params[http://(%C3%A9)/(%C3%A9)?(%C3%A9)&(%C3%A9)=(%C3%A9)#(%C3%A9)]",
"tests/test_urlutils.py::test_basic",
"tests/test_urlutils.py::test_utf8_url",
"tests/test_urlutils.py::test_idna",
"tests/test_urlutils.py::test_iri_query",
"tests/test_urlutils.py::test_iri_path",
"tests/test_urlutils.py::test_url_copy",
"tests/test_urlutils.py::test_invalid_port",
"tests/test_urlutils.py::test_invalid_ipv6",
"tests/test_urlutils.py::test_parse_url",
"tests/test_urlutils.py::test_parse_equals_in_qp_value",
"tests/test_urlutils.py::test_identical_equal",
"tests/test_urlutils.py::test_equal",
"tests/test_urlutils.py::test_not_equal",
"tests/test_urlutils.py::test_userinfo",
"tests/test_urlutils.py::test_quoted_userinfo",
"tests/test_urlutils.py::test_mailto",
"tests/test_urlutils.py::test_rel_navigate",
"tests/test_urlutils.py::test_navigate",
"tests/test_urlutils.py::test_chained_navigate[https://host/b-https://host-paths0]",
"tests/test_urlutils.py::test_self_normalize",
"tests/test_urlutils.py::test_normalize_with_case",
"tests/test_urlutils.py::test_netloc_slashes",
"tests/test_urlutils.py::test_find_all_links_basic",
"tests/test_urlutils.py::test_find_all_links",
"tests/test_urlutils.py::test_unicodey",
"tests/test_urlutils.py::test_str_repr"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-01-09 10:48:16+00:00
|
bsd-2-clause
| 3,686 |
|
mahmoud__boltons-341
|
diff --git a/boltons/dictutils.py b/boltons/dictutils.py
index d74abb9..cc70d3c 100644
--- a/boltons/dictutils.py
+++ b/boltons/dictutils.py
@@ -386,6 +386,10 @@ class OrderedMultiDict(dict):
def __ne__(self, other):
return not (self == other)
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
def pop(self, k, default=_MISSING):
"""Remove all values under key *k*, returning the most-recently
inserted value. Raises :exc:`KeyError` if the key is not
|
mahmoud/boltons
|
c2b04b4cb16410d7bf9ff781c78dee3ca908dc32
|
diff --git a/tests/test_dictutils.py b/tests/test_dictutils.py
index 076798b..20c431f 100644
--- a/tests/test_dictutils.py
+++ b/tests/test_dictutils.py
@@ -301,6 +301,19 @@ def test_setdefault():
assert e_omd.popall(1, None) is None
assert len(e_omd) == 0
+
+def test_ior():
+ omd_a = OMD(_ITEMSETS[1])
+ omd_b = OMD(_ITEMSETS[2])
+ omd_c = OMD(_ITEMSETS[1])
+
+ omd_a_id = id(omd_a)
+ omd_a |= omd_b
+ omd_c.update(omd_b)
+
+ assert omd_a_id == id(omd_a)
+ assert omd_a == omd_c
+
## END OMD TESTS
import string
|
Support in-place union for `dictutils.OrderedMultiDict`
Not sure if this is a feature request or bug since `OMD` inherits from `dict`.
Python `3.11.3`
Version `23.0.0`
```python
# union with a `dict`
In [2]: a = boltons.dictutils.OrderedMultiDict({'a': 1})
In [3]: a | {'c': 4}
Out[3]: {'a': 1, 'c': 4}
In [4]: a |= {'d': 5}
In [5]: a
Out[5]: OrderedMultiDict([('a', 1)])
# union with another `OMD`
In [10]: a = boltons.dictutils.OrderedMultiDict({'a': 1})
In [11]: a | boltons.dictutils.OrderedMultiDict({'c': 4})
Out[11]: {'a': 1, 'c': 4}
In [12]: a |= boltons.dictutils.OrderedMultiDict({'d': 5})
In [13]: a
Out[13]: OrderedMultiDict([('a', 1)])
```
`dict`'s behaviour
```python
In [6]: a = {'a': 1}
In [7]: a | {'c': 4}
Out[7]: {'a': 1, 'c': 4}
In [8]: a |= {'d': 5}
In [9]: a
Out[9]: {'a': 1, 'd': 5}
```
|
0.0
|
c2b04b4cb16410d7bf9ff781c78dee3ca908dc32
|
[
"tests/test_dictutils.py::test_ior"
] |
[
"tests/test_dictutils.py::test_dict_init",
"tests/test_dictutils.py::test_todict",
"tests/test_dictutils.py::test_eq",
"tests/test_dictutils.py::test_copy",
"tests/test_dictutils.py::test_omd_pickle",
"tests/test_dictutils.py::test_clear",
"tests/test_dictutils.py::test_types",
"tests/test_dictutils.py::test_multi_correctness",
"tests/test_dictutils.py::test_kv_consistency",
"tests/test_dictutils.py::test_update_basic",
"tests/test_dictutils.py::test_update",
"tests/test_dictutils.py::test_update_extend",
"tests/test_dictutils.py::test_invert",
"tests/test_dictutils.py::test_poplast",
"tests/test_dictutils.py::test_pop",
"tests/test_dictutils.py::test_addlist",
"tests/test_dictutils.py::test_pop_all",
"tests/test_dictutils.py::test_reversed",
"tests/test_dictutils.py::test_setdefault",
"tests/test_dictutils.py::test_subdict",
"tests/test_dictutils.py::test_subdict_keep_type",
"tests/test_dictutils.py::test_one_to_one",
"tests/test_dictutils.py::test_many_to_many",
"tests/test_dictutils.py::test_frozendict",
"tests/test_dictutils.py::test_frozendict_ior",
"tests/test_dictutils.py::test_frozendict_api"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-05-02 15:55:54+00:00
|
bsd-2-clause
| 3,687 |
|
mahmoud__glom-49
|
diff --git a/glom/core.py b/glom/core.py
index 69ae4dc..9cbd27b 100644
--- a/glom/core.py
+++ b/glom/core.py
@@ -721,6 +721,12 @@ class _TType(object):
def __repr__(self):
return _format_t(_T_PATHS[self][1:])
+ def __getstate__(self):
+ return tuple(_T_PATHS[self])
+
+ def __setstate__(self, state):
+ _T_PATHS[self] = state
+
def _format_t(path):
def kwarg_fmt(kw):
|
mahmoud/glom
|
917343ee7d3576a7497ec348f6fb2e94d239acb0
|
diff --git a/glom/test/test_path_and_t.py b/glom/test/test_path_and_t.py
index 394525f..614899f 100644
--- a/glom/test/test_path_and_t.py
+++ b/glom/test/test_path_and_t.py
@@ -69,3 +69,19 @@ def test_path_access_error_message():
assert ("PathAccessError: could not access 'b', part 1 of Path('a', T.b), got error: AttributeError"
in exc_info.exconly())
assert repr(exc_info.value) == """PathAccessError(AttributeError("\'dict\' object has no attribute \'b\'",), Path(\'a\', T.b), 1)"""
+
+
+def test_t_picklability():
+ import pickle
+
+ class TargetType(object):
+ def __init__(self):
+ self.attribute = lambda: None
+ self.attribute.method = lambda: {'key': lambda x: x * 2}
+
+ spec = T.attribute.method()['key'](x=5)
+
+ rt_spec = pickle.loads(pickle.dumps(spec))
+ assert repr(spec) == repr(rt_spec)
+
+ assert glom(TargetType(), spec) == 10
|
KeyError in _t_child from weakref
I'm running into a weird issue using glom in PySpark on Databricks.
This expression:
`glom(ping, (T[stub]["values"].values(), sum), default=0)`
(where `stub` is `"a11y_time"`)
is consistently throwing this exception when I run it on my real data:
```
/databricks/spark/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
318 raise Py4JJavaError(
319 "An error occurred while calling {0}{1}{2}.\n".
--> 320 format(target_id, ".", name), value)
321 else:
322 raise Py4JError( Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.runJob. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 22.0 failed 4 times, most recent failure: Lost task 0.3 in stage 22.0 (TID 243, 10.166.248.213, executor 2): org.apache.spark.api.python.PythonException:
Traceback (most recent call last):
File "/databricks/spark/python/pyspark/worker.py", line 229, in main process()
File "/databricks/spark/python/pyspark/worker.py", line 224, in process serializer.dump_stream(func(split_index, iterator), outfile)
File "/databricks/spark/python/pyspark/serializers.py", line 372, in dump_stream vs = list(itertools.islice(iterator, batch))
File "/databricks/spark/python/pyspark/rdd.py", line 1354, in takeUpToNumLeft yield next(iterator)
File "<command-26292>", line 10, in to_row
File "<command-26292>", line 5, in histogram_measures
File "/databricks/python/local/lib/python2.7/site-packages/glom/core.py", line 753, in __getitem__ return _t_child(self, '[', item)
File "/databricks/python/local/lib/python2.7/site-packages/glom/core.py", line 791, in _t_child _T_PATHS[t] = _T_PATHS[parent] + (operation, arg)
File "/usr/lib/python2.7/weakref.py", line 330, in __getitem__ return self.data[ref(key)]
KeyError: <weakref at 0x7f84c7d2f6d8; to '_TType' at 0x7f84c8933f30>
```
The object that's crashing it is, itself, totally unremarkable:
`{'submission_date': u'20180718', 'a11y_count': None, 'a11y_node_inspected_count': None, 'a11y_service_time': None, 'toolbox_time': None, 'toolbox_count': None, 'a11y_time': None, 'branch': u'Treatment', 'client_id': u'some-random-uuid', 'a11y_picker_time': None, 'a11y_select_accessible_for_node': None} `
The Python that Databricks is running looks like `2.7.12 (default, Dec 4 2017, 14:50:18) [GCC 5.4.0 20160609]`.
I can't reproduce it on my Mac in 2.7.14 or 2.7.12.
|
0.0
|
917343ee7d3576a7497ec348f6fb2e94d239acb0
|
[
"glom/test/test_path_and_t.py::test_t_picklability"
] |
[
"glom/test/test_path_and_t.py::test_list_path_access",
"glom/test/test_path_and_t.py::test_path",
"glom/test/test_path_and_t.py::test_empty_path_access",
"glom/test/test_path_and_t.py::test_path_t_roundtrip"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-08-08 03:00:19+00:00
|
bsd-3-clause
| 3,688 |
|
mahmoud__glom-69
|
diff --git a/glom/mutable.py b/glom/mutable.py
index d84eda6..ee605b7 100644
--- a/glom/mutable.py
+++ b/glom/mutable.py
@@ -4,7 +4,7 @@ This module contains Specs that perform mutations.
import operator
from pprint import pprint
-from .core import Path, T, S, Spec, glom, UnregisteredTarget, GlomError
+from .core import Path, T, S, Spec, glom, UnregisteredTarget, GlomError, PathAccessError
from .core import TType, register_op, TargetRegistry
try:
@@ -88,9 +88,21 @@ class Assign(object):
:func:`~glom.register()` using the ``"assign"`` operation name.
Attempting to assign to an immutable structure, like a
- :class:`tuple`, will result in a :class:`~glom.PathAssignError`.
+ :class:`tuple`, will result in a
+ :class:`~glom.PathAssignError`. Attempting to assign to a path
+ that doesn't exist will raise a :class:`~PathAccessError`.
+
+ To automatically backfill missing structures, you can pass a
+ callable to the *missing* argument. This callable will be called
+ for each path segment along the assignment which is not
+ present.
+
+ >>> target = {}
+ >>> assign(target, 'a.b.c', 'hi', missing=dict)
+ {'a': {'b': {'c': 'hi'}}}
+
"""
- def __init__(self, path, val):
+ def __init__(self, path, val, missing=None):
# TODO: an option like require_preexisting or something to
# ensure that a value is mutated, not just added. Current
# workaround is to do a Check().
@@ -105,6 +117,7 @@ class Assign(object):
self.op, self.arg = path.items()[-1]
except IndexError:
raise ValueError('path must have at least one element')
+ self._orig_path = path
self.path = path[:-1]
if self.op not in '[.P':
@@ -112,32 +125,51 @@ class Assign(object):
raise ValueError('last part of path must be setattr or setitem')
self.val = val
+ if missing is not None:
+ if not callable(missing):
+ raise TypeError('expected missing to be callable, not %r' % (missing,))
+ self.missing = missing
+
def glomit(self, target, scope):
if type(self.val) is Spec:
val = scope[glom](target, self.val, scope)
else:
val = self.val
- dest = scope[glom](target, self.path, scope)
+
+ op, arg, path = self.op, self.arg, self.path
+ try:
+ dest = scope[glom](target, self.path, scope)
+ except PathAccessError as pae:
+ if not self.missing:
+ raise
+
+ remaining_path = self._orig_path[pae.part_idx + 1:]
+ val = scope[glom](self.missing(), Assign(remaining_path, val, missing=self.missing), scope)
+
+ op, arg = self._orig_path.items()[pae.part_idx]
+ path = self._orig_path[:pae.part_idx]
+ dest = scope[glom](target, path, scope)
+
# TODO: forward-detect immutable dest?
- if self.op == '[':
- dest[self.arg] = val
- elif self.op == '.':
- setattr(dest, self.arg, val)
- elif self.op == 'P':
- assign = scope[TargetRegistry].get_handler('assign', dest)
- if not assign:
+ if op == '[':
+ dest[arg] = val
+ elif op == '.':
+ setattr(dest, arg, val)
+ elif op == 'P':
+ _assign = scope[TargetRegistry].get_handler('assign', dest)
+ if not _assign:
raise UnregisteredTarget('assign', type(dest),
scope[TargetRegistry].get_type_map('assign'),
path=scope[Path])
try:
- assign(dest, self.arg, val)
+ _assign(dest, arg, val)
except Exception as e:
- raise PathAssignError(e, self.path, self.arg)
+ raise PathAssignError(e, path, arg)
return target
-def assign(obj, path, val):
+def assign(obj, path, val, missing=None):
"""The ``assign()`` function provides convenient "deep set"
functionality, modifying nested data structures in-place::
@@ -146,10 +178,11 @@ def assign(obj, path, val):
>>> pprint(target)
{'a': [{'b': 'c'}, {'d': 'e'}]}
- For more information and examples, see the :class:`~glom.Assign`
- specifier type, which this function wraps.
+ Missing structures can also be automatically created with the
+ *missing* parameter. For more information and examples, see the
+ :class:`~glom.Assign` specifier type, which this function wraps.
"""
- return glom(obj, Assign(path, val))
+ return glom(obj, Assign(path, val, missing=missing))
_ALL_BUILTIN_TYPES = [v for v in __builtins__.values() if isinstance(v, type)]
|
mahmoud/glom
|
ab822e39ca897f687bd84b620896bd9097de119f
|
diff --git a/glom/test/test_mutable.py b/glom/test/test_mutable.py
index 843a3fc..97f12ad 100644
--- a/glom/test/test_mutable.py
+++ b/glom/test/test_mutable.py
@@ -1,6 +1,6 @@
import pytest
-from glom import glom, Path, T, Spec, Glommer, PathAssignError
+from glom import glom, Path, T, Spec, Glommer, PathAssignError, PathAccessError
from glom.core import UnregisteredTarget
from glom.mutable import Assign, assign
@@ -60,6 +60,9 @@ def test_bad_assign_target():
with pytest.raises(PathAssignError, match='could not assign'):
glom(BadTarget(), spec)
+
+ with pytest.raises(PathAccessError, match='could not access'):
+ assign({}, 'a.b.c', 'moot')
return
@@ -88,3 +91,96 @@ def test_invalid_assign_op_target():
with pytest.raises(ValueError):
assign(target, spec, None)
+ return
+
+
+def test_assign_missing_signature():
+ # test signature (non-callable missing hook)
+ with pytest.raises(TypeError, match='callable'):
+ assign({}, 'a.b.c', 'lol', missing='invalidbcnotcallable')
+ return
+
+
+def test_assign_missing_dict():
+ target = {}
+ val = object()
+
+ from itertools import count
+ counter = count()
+ def debugdict():
+ ret = dict()
+ #ret['id'] = id(ret)
+ #ret['inc'] = counter.next()
+ return ret
+
+ assign(target, 'a.b.c.d', val, missing=debugdict)
+
+ assert target == {'a': {'b': {'c': {'d': val}}}}
+
+
+def test_assign_missing_object():
+ val = object()
+ class Container(object):
+ pass
+
+ target = Container()
+ target.a = extant_a = Container()
+ assign(target, 'a.b.c.d', val, missing=Container)
+
+ assert target.a.b.c.d is val
+ assert target.a is extant_a # make sure we didn't overwrite anything on the path
+
+
+def test_assign_missing_with_extant_keys():
+ """This test ensures that assign with missing doesn't overwrite
+ perfectly fine extant keys that are along the path it needs to
+ assign to. call count is also checked to make sure missing() isn't
+ invoked too many times.
+
+ """
+ target = {}
+ value = object()
+ default_struct = {'b': {'c': {}}}
+
+ call_count = [0]
+
+ def _get_default_struct():
+ call_count[0] += 1 # make sure this is only called once
+ return default_struct
+
+ assign(target, 'a.b.c', value, missing=_get_default_struct)
+
+ assert target['a']['b']['c'] is value
+ assert target['a']['b'] is default_struct['b']
+ assert call_count == [1]
+
+
+def test_assign_missing_unassignable():
+ """Check that the final assignment to the target object comes last,
+ ensuring that failed assignments don't leave targets in a bad
+ state.
+
+ """
+
+ class Tarjay(object):
+ init_count = 0
+ def __init__(self):
+ self.__class__.init_count += 1
+
+ @property
+ def unassignable(self):
+ return
+
+ value = object()
+ target = {"preexisting": "ok"}
+
+ with pytest.raises(PathAssignError):
+ assign(target, 'tarjay.unassignable.a.b.c', value, missing=Tarjay)
+
+ assert target == {'preexisting': 'ok'}
+
+ # why 3? "c" gets the value of "value", while "b", "a", and
+ # "tarjay" all succeed and are set to Tarjay instances. Then
+ # unassignable is already present, but not possible to assign to,
+ # raising the PathAssignError.
+ assert Tarjay.init_count == 3
|
deeply nested setting
I work on a project that flings around deeply nested Python structures with wild abandon. glom nicely handles the "get something from this structure even if all the branches of the path aren't there" and now I can replace some code I wrote. Yay!
The other side of things that I need to handle is setting a value in a deeply nested structure where the branches of the path may not be there.
For example, maybe something like this which uses dicts:
```
>>> from glom import glom_set
>>> foo = {}
>>> glom_set(foo, 'a.b.c', value=5)
>>> foo
{'a': {'b': {'c': 5}}}
```
There are more complex tree manipulations that could be done, but at the moment I'm thinking about setting a single leaf value.
Is manipulating deeply nested data structures in place in-scope for glom?
|
0.0
|
ab822e39ca897f687bd84b620896bd9097de119f
|
[
"glom/test/test_mutable.py::test_assign_missing_signature",
"glom/test/test_mutable.py::test_assign_missing_dict",
"glom/test/test_mutable.py::test_assign_missing_object",
"glom/test/test_mutable.py::test_assign_missing_with_extant_keys",
"glom/test/test_mutable.py::test_assign_missing_unassignable"
] |
[
"glom/test/test_mutable.py::test_assign",
"glom/test/test_mutable.py::test_assign_spec_val",
"glom/test/test_mutable.py::test_unregistered_assign",
"glom/test/test_mutable.py::test_bad_assign_target",
"glom/test/test_mutable.py::test_sequence_assign",
"glom/test/test_mutable.py::test_invalid_assign_op_target"
] |
{
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-12-12 09:47:15+00:00
|
bsd-3-clause
| 3,689 |
|
maikelboogerd__eventcore-kafka-7
|
diff --git a/eventcore_kafka/__init__.py b/eventcore_kafka/__init__.py
index b16a1d8..6edf4ad 100644
--- a/eventcore_kafka/__init__.py
+++ b/eventcore_kafka/__init__.py
@@ -1,2 +1,2 @@
-from .producer import KafkaProducer # noqa
-from .consumer import KafkaConsumer # noqa
+from .producer import KafkaProducer # noqa
+from .consumer import KafkaConsumer, BlockingKafkaConsumer # noqa
diff --git a/eventcore_kafka/consumer.py b/eventcore_kafka/consumer.py
index 3d380b4..650bad1 100644
--- a/eventcore_kafka/consumer.py
+++ b/eventcore_kafka/consumer.py
@@ -2,8 +2,9 @@ import json
import logging
import confluent_kafka as kafka
-
+from confluent_kafka.cimpl import Message
from eventcore import Consumer
+from eventcore.exceptions import FatalConsumerError
log = logging.getLogger(__name__)
@@ -16,14 +17,13 @@ class KafkaConsumer(Consumer):
:param topics: list of topics to consume from.
"""
- def __init__(self, servers, group_id, topics):
+ def __init__(self, servers, group_id, topics, **kwargs):
# Parse the servers to ensure it's a comma-separated string.
if isinstance(servers, list):
servers = ','.join(servers)
- self.kafka_consumer = kafka.Consumer({
- 'bootstrap.servers': servers,
- 'group.id': group_id
- })
+ settings = {'bootstrap.servers': servers, 'group.id': group_id}
+ settings.update(kwargs)
+ self.kafka_consumer = kafka.Consumer(settings)
# Parse the topics to ensure it's a list.
if isinstance(topics, str):
topics = topics.split(',')
@@ -32,28 +32,76 @@ class KafkaConsumer(Consumer):
def consume(self):
while True:
- message = self.kafka_consumer.poll(1.0)
- if not message:
- continue
- if message.error():
- # PARTITION_EOF error can be ignored.
- if message.error().code() == kafka.KafkaError._PARTITION_EOF:
- continue
- else:
- raise kafka.KafkaException(message.error())
-
- try:
- message_body = json.loads(message.value())
- except TypeError:
- message_body = json.loads(message.value().decode('utf-8'))
- except:
- log.error('@KafkaConsumer.consume Exception:',
- exc_info=True)
- try:
- subject = message.key().decode('utf-8')
- except AttributeError:
- subject = message.key()
-
- self.process_event(name=message_body.get('event'),
- subject=subject,
- data=message_body.get('data'))
+ self.poll_and_process()
+
+ def poll_and_process(self):
+ message = self.kafka_consumer.poll()
+ if not self.is_valid_message(message):
+ return
+ subject, message_body = self.parse_message(message)
+ self.process_event(
+ name=message_body.get('event'),
+ subject=subject,
+ data=message_body.get('data'))
+
+ @staticmethod
+ def is_valid_message(message: Message):
+ if not message:
+ return False
+ if message.error():
+ # PARTITION_EOF error can be ignored.
+ if message.error().code() == kafka.KafkaError._PARTITION_EOF:
+ return False
+ else:
+ raise kafka.KafkaException(message.error())
+ return True
+
+ @staticmethod
+ def parse_message(message: Message) -> (str, dict):
+ subject, message_body = None, None
+ try:
+ message_body = json.loads(message.value())
+ except TypeError:
+ message_body = json.loads(message.value().decode('utf-8'))
+ except:
+ log.error('@KafkaConsumer.consume Exception:', exc_info=True)
+ try:
+ subject = message.key().decode('utf-8')
+ except AttributeError:
+ subject = message.key()
+
+ if message_body is None or not isinstance(message_body, dict):
+ raise ValueError("Message body is malformed: {}".format(
+ repr(message_body)))
+
+ return subject, message_body
+
+
+class BlockingKafkaConsumer(KafkaConsumer):
+ """Consumer for Kafka topics, blocks when a message cannot be processed."""
+
+ def __init__(self, servers, group_id, topics, **kwargs):
+ kwargs['enable.auto.commit'] = False
+ kwargs['auto.offset.reset'] = "smallest" # Start from first failed.
+ super().__init__(servers, group_id, topics, **kwargs)
+
+ def poll_and_process(self):
+ message = self.kafka_consumer.poll()
+ if not self.is_valid_message(message):
+ self.kafka_consumer.commit(message)
+ return
+ try:
+ subject, message_body = self.parse_message(message)
+ except (ValueError, AttributeError, TypeError):
+ self.kafka_consumer.commit(message)
+ return
+ try:
+ self.process_event(
+ name=message_body.get('event'),
+ subject=subject,
+ data=message_body.get('data'))
+ self.kafka_consumer.commit(message)
+ except BaseException:
+ raise FatalConsumerError(
+ "Message with body {} could not be processed and blocks "
+ "the consumer. Manual action required.".format(message_body))
diff --git a/eventcore_kafka/header.py b/eventcore_kafka/header.py
new file mode 100644
index 0000000..e8aa19d
--- /dev/null
+++ b/eventcore_kafka/header.py
@@ -0,0 +1,26 @@
+import datetime
+import uuid
+
+
+class Header:
+ """
+ Provides the meta information needed to increase traceability.
+ """
+
+ id = ''
+ source = ''
+ timestamp = ''
+ type = ''
+
+ def __init__(self, source=None, event=None):
+ """
+ Id and timestamps are added automatically. The id is an UUID which
+ identifies the current message, so it can be traced across services.
+ :param source: The source which the message is originated from.
+ :param event: The name of the event which is produced.
+ """
+ self.id = str(uuid.uuid4())
+ self.timestamp = datetime.datetime.now().isoformat()
+ self.source = source
+ self.type = event
+
diff --git a/eventcore_kafka/producer.py b/eventcore_kafka/producer.py
index 9214b66..efda89c 100644
--- a/eventcore_kafka/producer.py
+++ b/eventcore_kafka/producer.py
@@ -1,5 +1,6 @@
import json
import confluent_kafka as kafka
+from eventcore_kafka.header import Header
from eventcore import Producer
@@ -10,7 +11,18 @@ class KafkaProducer(Producer):
:param servers: list of brokers to consume from.
"""
- def __init__(self, servers):
+ source = ''
+
+ def __init__(self, servers, source=None):
+ """
+ Initialize the producer for Kafka
+ :param servers: The host and port of where Kafka runs.
+ :param source: The source of the application which is producing the
+ messages.
+ """
+
+ self.source = source
+
# Parse the servers to ensure it's a comma-separated string.
if isinstance(servers, list):
servers = ','.join(servers)
@@ -18,6 +30,15 @@ class KafkaProducer(Producer):
'bootstrap.servers': servers
})
+ def get_headers(self, event):
+ """
+ Creates an header which is added to Kafka
+ :param event: the name of the event e.g user.created
+ :return: a json serialized representation of the header
+ """
+ header = Header(source=self.source, event=event)
+ return header.__dict__
+
def produce(self, topic, event, subject, data):
message_body = json.dumps({
'event': event,
@@ -25,4 +46,5 @@ class KafkaProducer(Producer):
})
self.kafka_producer.produce(topic=topic,
key=subject,
- value=message_body)
+ value=message_body,
+ headers=self.get_headers(event))
diff --git a/setup.py b/setup.py
index 2344fa1..6a16fa1 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@ import setuptools
setuptools.setup(
name='eventcore-kafka',
- version='0.3.2',
+ version='0.3.3rc2',
description='Produce and consume events with Kafka.',
author='Maikel van den Boogerd',
author_email='[email protected]',
|
maikelboogerd/eventcore-kafka
|
b3f36ddf18f44c5ecc933a17792021d645581978
|
diff --git a/tests.py b/tests.py
new file mode 100644
index 0000000..155813d
--- /dev/null
+++ b/tests.py
@@ -0,0 +1,102 @@
+import unittest
+from collections import namedtuple
+from unittest.mock import Mock, patch
+
+from confluent_kafka.cimpl import KafkaException
+
+from eventcore_kafka import KafkaConsumer, BlockingKafkaConsumer
+
+from eventcore.exceptions import FatalConsumerError
+
+
+def consumer_settings():
+ return {"servers": "localhost", "group_id": 1, "topics": ["hot"]}
+
+
+Message = namedtuple("Message", (
+ "key",
+ "value",
+ "error",
+))
+
+
+def mock_message() -> Message:
+ return Message(
+ key=Mock(return_value=b"MyKey"),
+ value=Mock(return_value=b'{"data": []}'),
+ error=Mock(return_value=False))
+
+
+def mock_handler(name, subject, data):
+ raise NotImplementedError()
+
+
+class KafkaConsumerTest(unittest.TestCase):
+ def test_is_valid_message(self):
+ message = mock_message()
+ valid = KafkaConsumer.is_valid_message(message)
+
+ assert valid
+
+ def test_is_invalid_message_error(self):
+ error = Mock()
+ message = Message(
+ key=Mock(return_value=b"MyKey"),
+ value=Mock(return_value=b'{"data": []}'),
+ error=Mock(return_value=error))
+ self.assertRaises(
+ KafkaException, KafkaConsumer.is_valid_message, message=message)
+
+ def test_is_invalid_message(self):
+ valid = KafkaConsumer.is_valid_message(b"")
+ assert valid is False
+
+ def test_parse_message(self):
+ message = mock_message()
+ subject, message_body = KafkaConsumer.parse_message(message)
+ assert subject == "MyKey"
+ assert len(message_body['data']) == 0
+
+
+class ConsumerMock(Mock):
+ pass
+
+
+class BlockingKafkaConsumerTest(unittest.TestCase):
+ @patch("eventcore_kafka.consumer.kafka")
+ def test_commit_message(self, kafka):
+ message = mock_message()
+
+ self._patch_consumer(kafka, message)
+
+ settings = consumer_settings()
+ consumer = BlockingKafkaConsumer(**settings)
+ consumer.process_event = Mock()
+
+ consumer.poll_and_process()
+
+ assert kafka.Consumer.commit.called is True
+
+ @patch("eventcore_kafka.consumer.kafka")
+ def test_blocking_message(self, kafka):
+ message = mock_message()
+
+ self._patch_consumer(kafka, message)
+
+ settings = consumer_settings()
+ consumer = BlockingKafkaConsumer(**settings)
+ consumer.process_event = mock_handler
+
+ with self.assertRaises(FatalConsumerError):
+ consumer.poll_and_process()
+
+ assert kafka.Consumer.commit.called is False
+
+ @staticmethod
+ def _patch_consumer(kafka, message):
+ ConsumerMock.poll = Mock(return_value=message)
+ ConsumerMock.subscribe = Mock()
+ ConsumerMock.commit = Mock()
+ kafka.KafkaError._PARTITION_EOF = None
+ kafka.KafkaException = NotImplementedError
+ kafka.Consumer = ConsumerMock
|
Support for blocking events
Whenever a consumer fails to process a message successfully sometimes it's desired behaviour to make that a blocking event.
For instance if your events need to be consumed in order you'd want to event to be blocking:
```
MESSAGE 1: Order 500 items of product Eggs
MESSAGE 2: Change order to 5 items of product Eggs
```
If `MESSAGE 2` fails, the end user is not going to be too happy about it. In this case you want the last message to be processed last (or the first not at all, but you can't anticipate that at all times).
A probable cause of `MESSAGE 2` failure might be: NetworkError (DB, HTTP) or a failure in logic (schema change).
You might be able to handle NetworkErrors of error by retrying `MESSAGE 2`. But logic flaws need to be fixed manually and this might pose a problem when you have `Z` seconds to resolve the issue.
|
0.0
|
b3f36ddf18f44c5ecc933a17792021d645581978
|
[
"tests.py::KafkaConsumerTest::test_is_invalid_message",
"tests.py::KafkaConsumerTest::test_is_invalid_message_error",
"tests.py::KafkaConsumerTest::test_is_valid_message",
"tests.py::KafkaConsumerTest::test_parse_message",
"tests.py::BlockingKafkaConsumerTest::test_blocking_message",
"tests.py::BlockingKafkaConsumerTest::test_commit_message"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-11-06 12:43:15+00:00
|
mit
| 3,690 |
|
makcedward__nlpaug-169
|
diff --git a/nlpaug/augmenter/char/char_augmenter.py b/nlpaug/augmenter/char/char_augmenter.py
index be44785..c431ac5 100755
--- a/nlpaug/augmenter/char/char_augmenter.py
+++ b/nlpaug/augmenter/char/char_augmenter.py
@@ -95,7 +95,7 @@ class CharAugmenter(Augmenter):
exception = WarningException(name=WarningName.OUT_OF_VOCABULARY,
code=WarningCode.WARNING_CODE_002, msg=WarningMessage.NO_WORD)
exception.output()
- return None
+ return []
if len(idxes) < aug_cnt:
aug_cnt = len(idxes)
aug_idxes = self.sample(idxes, aug_cnt)
|
makcedward/nlpaug
|
944d3a269126da7362781b1421688a0d88d3b9d2
|
diff --git a/test/augmenter/char/test_char.py b/test/augmenter/char/test_char.py
index d964bbc..a6ba349 100755
--- a/test/augmenter/char/test_char.py
+++ b/test/augmenter/char/test_char.py
@@ -38,6 +38,13 @@ class TestCharacter(unittest.TestCase):
tokens = aug.tokenizer(text)
self.assertEqual(tokens, expected_tokens)
+ def test_no_aug(self):
+ aug = nac.KeyboardAug(aug_word_min=0.0, aug_word_p=0.05)
+ text = '| 4 || || ½ || 0 || ½ || - || 1 || 1 || 1 || 0 || 0 || 0 || 1 || 1 || 1 || 1 || 1 || 1 || 10 || 67.75'
+
+ augmented_data = aug.augment(text)
+ self.assertEqual(text.replace(' ', ''), augmented_data.replace(' ', ''))
+
def test_multi_thread(self):
text = 'The quick brown fox jumps over the lazy dog.'
n = 3
|
Bugfix for issue #203539
I think there is a bug in the char augmenter:
https://github.com/makcedward/nlpaug/blob/5238e0be734841b69651d2043df535d78a8cc594/nlpaug/augmenter/char/char_augmenter.py#L98
the behavior is not consistent with that of the word augmenter:
https://github.com/makcedward/nlpaug/blob/5238e0be734841b69651d2043df535d78a8cc594/nlpaug/augmenter/word/word_augmenter.py#L80
this is related to issue #203539
After changing `return None` to `return []`, the code below will not raise an exception.
```
import nlpaug.augmenter.char as nac
aug = nac.KeyboardAug(aug_word_min=0.0, aug_word_p=0.05)
aug.augment("| 4 || || ½ || 0 || ½ || - || 1 || 1 || 1 || 0 || 0 || 0 || 1 || 1 || 1 || 1 || 1 || 1 || 10 || 67.75")
```
|
0.0
|
944d3a269126da7362781b1421688a0d88d3b9d2
|
[
"test/augmenter/char/test_char.py::TestCharacter::test_no_aug"
] |
[
"test/augmenter/char/test_char.py::TestCharacter::test_empty",
"test/augmenter/char/test_char.py::TestCharacter::test_min_char",
"test/augmenter/char/test_char.py::TestCharacter::test_multi_inputs",
"test/augmenter/char/test_char.py::TestCharacter::test_multi_thread",
"test/augmenter/char/test_char.py::TestCharacter::test_special_char",
"test/augmenter/char/test_char.py::TestCharacter::test_stopwords",
"test/augmenter/char/test_char.py::TestCharacter::test_stopwords_regex",
"test/augmenter/char/test_char.py::TestCharacter::test_tokenizer"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-10-24 07:07:25+00:00
|
mit
| 3,691 |
|
malte-soe__dataclass-cli-6
|
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 35e558f..3cc4d10 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -1,5 +1,8 @@
name: Codecoverage
-on: [push]
+on:
+ pull_request:
+ push:
+ branches: [master]
jobs:
run:
runs-on: ubuntu-latest
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index 73b2095..c147950 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -1,10 +1,8 @@
name: pre-commit
-
on:
pull_request:
push:
branches: [master]
-
jobs:
pre-commit:
runs-on: ubuntu-latest
diff --git a/dataclass_cli/parser.py b/dataclass_cli/parser.py
index 4caf33b..38be763 100644
--- a/dataclass_cli/parser.py
+++ b/dataclass_cli/parser.py
@@ -1,6 +1,7 @@
import argparse
import dataclasses
import enum
+from functools import partial
from typing import Dict, List, Union
@@ -9,16 +10,23 @@ class Options(str, enum.Enum):
HELP_TEXT = enum.auto()
-def add(
+def add(cls=None, *, name=None, **kwargs):
+ if cls is None:
+ return partial(_add, name=name, **kwargs)
+ return _add(cls, name=name, **kwargs)
+
+
+def _add(
cls,
*,
+ name: str = "",
_classes: Dict[str, List[str]] = {},
_parsed_args: Dict[str, Union[int, str]] = {},
_parser=argparse.ArgumentParser(),
):
assert dataclasses.is_dataclass(cls)
- name = cls.__name__.lower()
+ name = name or cls.__name__.lower()
assert name not in _classes
_classes[name] = [arg.name for arg in dataclasses.fields(cls)]
|
malte-soe/dataclass-cli
|
77350a5c3ae2390a272e061a707d347e723cc977
|
diff --git a/tests/test_dc_cli.py b/tests/test_dc_cli.py
index 02e0e8a..f4f7319 100644
--- a/tests/test_dc_cli.py
+++ b/tests/test_dc_cli.py
@@ -133,3 +133,17 @@ class TestDcCli(unittest.TestCase):
_ = DataclassWithNoDefault()
self.assertIn("the following arguments are required", fake_out.getvalue())
+
+ def test_custom_name(self):
+ name = "custom42"
+ number = 42
+
+ @self.add(name=name)
+ @dataclass
+ class Dataclass:
+ number: int
+
+ testargs = f"test.py --{name}_number {number}".split()
+ with mock.patch("sys.argv", testargs):
+ dc = Dataclass()
+ self.assertEqual(number, dc.number)
|
Customize group name
Currently the decorator uses the class name as group name. Add ability to set a custom group name
|
0.0
|
77350a5c3ae2390a272e061a707d347e723cc977
|
[
"tests/test_dc_cli.py::TestDcCli::test_custom_name"
] |
[
"tests/test_dc_cli.py::TestDcCli::test_help_text",
"tests/test_dc_cli.py::TestDcCli::test_multiple_dataclass_parsing",
"tests/test_dc_cli.py::TestDcCli::test_no_default_value",
"tests/test_dc_cli.py::TestDcCli::test_no_possible_value_option",
"tests/test_dc_cli.py::TestDcCli::test_possible_value_option",
"tests/test_dc_cli.py::TestDcCli::test_single_dataclass_parsing"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-17 19:33:33+00:00
|
mit
| 3,692 |
|
manicmaniac__sqlalchemy-repr-7
|
diff --git a/sqlalchemy_repr.py b/sqlalchemy_repr.py
index 7d011b0..eb9ee8f 100644
--- a/sqlalchemy_repr.py
+++ b/sqlalchemy_repr.py
@@ -11,7 +11,6 @@ try:
except ImportError:
from repr import Repr as _Repr
-
__all__ = ['Repr', 'PrettyRepr', 'RepresentableBase',
'PrettyRepresentableBase']
@@ -46,8 +45,17 @@ class Repr(_Repr):
return '%s=%r' % (attr_name, attr_value)
def _iter_attrs(self, obj):
+ blacklist = set(getattr(obj, '__repr_blacklist__', set()))
+ whitelist = set(getattr(obj, '__repr_whitelist__', set()))
+
attr_names = inspect(obj.__class__).columns.keys()
for attr_name in attr_names:
+ if attr_name in blacklist:
+ continue
+
+ if whitelist and attr_name not in whitelist:
+ continue
+
yield (attr_name, getattr(obj, attr_name))
|
manicmaniac/sqlalchemy-repr
|
cbeb823d5f1953aa4f59617e32c1eca4033ff0f2
|
diff --git a/tests/test_sqlalchemy_repr.py b/tests/test_sqlalchemy_repr.py
index 069e3dd..8fbe8ed 100644
--- a/tests/test_sqlalchemy_repr.py
+++ b/tests/test_sqlalchemy_repr.py
@@ -28,6 +28,16 @@ class Entry(Base):
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
+class EntryWithBlacklistAndWhitelist(Base):
+ __tablename__ = 'entries_with_blacklist'
+ id = Column(Integer, primary_key=True)
+ title = Column(Unicode(255), nullable=False)
+ text = Column(UnicodeText, nullable=False, default='')
+ user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
+ __repr_blacklist__ = ('text',)
+ __repr_whitelist__ = ('text', 'title')
+
+
class TestRepr(unittest.TestCase):
def setUp(self):
engine = create_engine('sqlite://')
@@ -39,9 +49,11 @@ class TestRepr(unittest.TestCase):
self.session = Session()
self.entry = Entry(title='ham', text=self.dummy_text, user_id=1)
+ self.blacklist_entry = EntryWithBlacklistAndWhitelist(title='ham', text=self.dummy_text, user_id=1)
self.user = User(name='spam', created=self._date)
self.session.add(self.user)
self.session.add(self.entry)
+ self.session.add(self.blacklist_entry)
self.session.commit()
def test_repr_with_user(self):
@@ -72,6 +84,11 @@ class TestRepr(unittest.TestCase):
pattern = r"<Entry\n id=1,\n title=u?'ham',\n text=u?'Lorem.*',\n user_id=1>"
self.assertMatch(result, pattern)
+ def test_pretty_repr_with_blacklist_and_whitelist(self):
+ result = PrettyRepr().repr(self.blacklist_entry)
+ pattern = r"<EntryWithBlacklistAndWhitelist\n title='ham'>"
+ self.assertMatch(result, pattern)
+
def assertMatch(self, string, pattern):
if not re.match(pattern, string):
message = "%r doesn't match %r" % (string, pattern)
|
Allow for whitelist/blacklist list of fields
Some of my models have data columns that are much too verbose to print, it would be great if I could easily specify a whitelist/blacklist of fields that should be shown or hidden.
Perhaps something like this:
```
class SomeModel(Base):
...
repr_blacklist = {'field_a', 'field_b'}
```
|
0.0
|
cbeb823d5f1953aa4f59617e32c1eca4033ff0f2
|
[
"tests/test_sqlalchemy_repr.py::TestRepr::test_pretty_repr_with_blacklist_and_whitelist"
] |
[
"tests/test_sqlalchemy_repr.py::TestRepr::test_pretty_repr_with_entry",
"tests/test_sqlalchemy_repr.py::TestRepr::test_pretty_repr_with_user",
"tests/test_sqlalchemy_repr.py::TestRepr::test_repr_with_entry",
"tests/test_sqlalchemy_repr.py::TestRepr::test_repr_with_plain_python_object",
"tests/test_sqlalchemy_repr.py::TestRepr::test_repr_with_user"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-10-19 23:36:38+00:00
|
mit
| 3,693 |
|
mantidproject__mslice-848
|
diff --git a/mslice/models/colors.py b/mslice/models/colors.py
index c90dd0fb..fc88053d 100644
--- a/mslice/models/colors.py
+++ b/mslice/models/colors.py
@@ -18,6 +18,8 @@ the string cyan
"""
from __future__ import (absolute_import, division)
+import re
+
from matplotlib import rcParams
from six import iteritems
@@ -39,6 +41,7 @@ except ImportError:
_BASIC_COLORS_HEX_MAPPING = {'blue': '#1f77b4', 'orange': '#ff7f0e', 'green': '#2ca02c', 'red': '#d62728',
'purple': '#9467bd', 'brown': '#8c564b', 'pink': '#e377c2', 'gray': '#7f7f7f',
'olive': '#bcbd22', 'cyan': '#17becf', 'yellow': '#bfbf00', 'magenta': '#bf00bf'}
+HEX_COLOR_REGEX = re.compile(r'^#(?:[0-9a-fA-F]{3}){1,2}$')
def pretty_name(name):
@@ -75,16 +78,20 @@ def name_to_color(name):
Translate between our string names and the mpl color
representation
:param name: One of our known string names
- :return: The string identifier we have chosen
- :raises: ValueError if the color is not known
+ :return: The string identifier we have chosen, or a HEX code if an identifier does not exist
+ :raises: ValueError if the color is not known and is not a HEX code
"""
- try:
+ if name in _BASIC_COLORS_HEX_MAPPING:
return _BASIC_COLORS_HEX_MAPPING[name]
- except KeyError:
- try:
- return mpl_named_colors()[name]
- except KeyError:
- raise ValueError("Color name {} unknown".format(name))
+
+ mpl_colors = mpl_named_colors()
+ if name in mpl_colors:
+ return mpl_colors[name]
+
+ if re.match(HEX_COLOR_REGEX, name):
+ return name
+
+ raise ValueError(f"Unknown color name '{name}'")
def color_to_name(color):
@@ -92,16 +99,19 @@ def color_to_name(color):
Translate between a matplotlib color representation
and our string names.
:param color: Any matplotlib color representation
- :return: The string identifier we have chosen
- :raises: ValueError if the color is not known
+ :return: The string identifier we have chosen, or a HEX code if an identifier does not exist
+ :raises: ValueError if the color is not known and is not a HEX code
"""
color_as_hex = to_hex(color)
for name, hexvalue in iteritems(_BASIC_COLORS_HEX_MAPPING):
if color_as_hex == hexvalue:
return name
- else:
- for name, value in iteritems(mpl_named_colors()):
- if color_as_hex == to_hex(value):
- return pretty_name(name)
- else:
- raise ValueError("matplotlib color {} unknown".format(color))
+
+ for name, value in iteritems(mpl_named_colors()):
+ if color_as_hex == to_hex(value):
+ return pretty_name(name)
+
+ if re.match(HEX_COLOR_REGEX, color):
+ return color
+
+ raise ValueError(f"Unknown matplotlib color '{color}'")
|
mantidproject/mslice
|
20540deb709899eaef6a6dd7c1f8b4f4f59cebcc
|
diff --git a/mslice/tests/colors_test.py b/mslice/tests/colors_test.py
index 552a6797..7378e4a3 100644
--- a/mslice/tests/colors_test.py
+++ b/mslice/tests/colors_test.py
@@ -16,6 +16,9 @@ class ColorsTest(unittest.TestCase):
def test_known_color_name_gives_expected_hex(self):
self.assertEqual("#2ca02c", name_to_color("green"))
+ def test_unknown_color_returns_hex_if_it_is_a_hex_code(self):
+ self.assertEqual("#9933ff", name_to_color("#9933ff"))
+
def test_known_hex_gives_expected_color_name(self):
self.assertEqual("green", color_to_name("#2ca02c"))
@@ -29,6 +32,9 @@ class ColorsTest(unittest.TestCase):
self.assertEqual('cyan', color_to_name('#17becf'))
self.assertEqual('navy', color_to_name('#000080'))
+ def test_unknown_color_is_returned_if_it_is_a_hex_code(self):
+ self.assertEqual("#9933ff", color_to_name("#9933ff"))
+
def test_pretty_name(self):
self.assertEqual('blue', pretty_name('tab:blue'))
|
MSlice crashes upon opening plot options for a non-supported colour
**Describe the bug**
If using a script (probably old) to generate a cut plot that uses a colour that we don't "support", when opening plot options `mslice` crashes.
**To Reproduce**
Steps to reproduce the behaviour:
1. Plot any cut
2. Generate a script to clipboard, paste it back into the `mslice` interface
3. Change the colour specified in the script to `#9933ff`
4. Run the script
5. Open plot options, observe error.
**Expected behaviour**
This error should be handled properly.
|
0.0
|
20540deb709899eaef6a6dd7c1f8b4f4f59cebcc
|
[
"mslice/tests/colors_test.py::ColorsTest::test_unknown_color_is_returned_if_it_is_a_hex_code",
"mslice/tests/colors_test.py::ColorsTest::test_unknown_color_returns_hex_if_it_is_a_hex_code"
] |
[
"mslice/tests/colors_test.py::ColorsTest::test_basic_color_is_known",
"mslice/tests/colors_test.py::ColorsTest::test_color_names_do_not_contain_prefixes",
"mslice/tests/colors_test.py::ColorsTest::test_colors_list_is_limited_in_size",
"mslice/tests/colors_test.py::ColorsTest::test_known_color_name_gives_expected_hex",
"mslice/tests/colors_test.py::ColorsTest::test_known_hex_gives_expected_color_name",
"mslice/tests/colors_test.py::ColorsTest::test_pretty_name",
"mslice/tests/colors_test.py::ColorsTest::test_unknown_color_name_raises_valueerror",
"mslice/tests/colors_test.py::ColorsTest::test_unknown_hex_color_raises_valueerror"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-06 12:36:52+00:00
|
bsd-3-clause
| 3,694 |
|
manubot__manubot-93
|
diff --git a/manubot/cite/doi.py b/manubot/cite/doi.py
index 928b849..0a7d0f2 100644
--- a/manubot/cite/doi.py
+++ b/manubot/cite/doi.py
@@ -1,3 +1,4 @@
+import json
import logging
import urllib.request
@@ -7,6 +8,38 @@ from manubot.cite.pubmed import get_pubmed_ids_for_doi
from manubot.util import get_manubot_user_agent
+def expand_short_doi(short_doi):
+ """
+ Convert a shortDOI to a regular DOI.
+ """
+ if not short_doi.startswith('10/'):
+ raise ValueError(f'shortDOIs start with `10/`, but expand_short_doi received: {short_doi}')
+ url = f'https://doi.org/api/handles/{short_doi.lower()}'
+ params = {
+ "type": "HS_ALIAS",
+ }
+ response = requests.get(url, params=params)
+ # response documentation at https://www.handle.net/proxy_servlet.html
+ results = response.json()
+ response_code = results.get('responseCode') # Handle protocol response code
+ if response_code == 100:
+ raise ValueError(f'Handle not found. Double check short_doi: {short_doi}')
+ if response_code == 200:
+ raise ValueError(f'HS_ALIAS values not found. Double check short_doi: {short_doi}')
+ if response_code != 1:
+ raise ValueError(f'Error response code of {response_code} returned by {response.url}')
+ values = results.get('values', [])
+ for value in values:
+ if value.get('type') == 'HS_ALIAS':
+ doi = value['data']['value']
+ return doi.lower()
+ raise RuntimeError(
+ f'HS_ALIAS value not found by expand_short_doi("{short_doi}")\n'
+ f'The following JSON was retrieved from {response.url}:\n'
+ + json.dumps(results, indent=2)
+ )
+
+
def get_short_doi_url(doi):
"""
Get the shortDOI URL for a DOI.
@@ -19,8 +52,7 @@ def get_short_doi_url(doi):
try:
response = requests.get(url, headers=headers).json()
short_doi = response['ShortDOI']
- short_doi = short_doi[3:] # Remove "10/" prefix
- short_url = 'https://doi.org/' + short_doi
+ short_url = 'https://doi.org/' + short_doi[3:] # Remove "10/" prefix
return short_url
except Exception:
logging.warning(f'shortDOI lookup failed for {doi}', exc_info=True)
diff --git a/manubot/cite/util.py b/manubot/cite/util.py
index 755d5f0..8c2005a 100644
--- a/manubot/cite/util.py
+++ b/manubot/cite/util.py
@@ -1,3 +1,4 @@
+import functools
import logging
import re
@@ -34,16 +35,33 @@ citation_pattern = re.compile(
r'(?<!\w)@[a-zA-Z0-9][\w:.#$%&\-+?<>~/]*[a-zA-Z0-9/]')
[email protected]_cache(maxsize=5_000)
def standardize_citation(citation):
"""
- Standardize citation idenfiers based on their source
+ Standardize citation identifiers based on their source
"""
source, identifier = citation.split(':', 1)
+
if source == 'doi':
+ if identifier.startswith('10/'):
+ from manubot.cite.doi import expand_short_doi
+ try:
+ identifier = expand_short_doi(identifier)
+ except Exception as error:
+ # If DOI shortening fails, return the unshortened DOI.
+ # DOI metadata lookup will eventually fail somewhere with
+ # appropriate error handling, as opposed to here.
+ logging.error(
+ f'Error in expand_short_doi for {identifier} '
+ f'due to a {error.__class__.__name__}:\n{error}'
+ )
+ logging.info(error, exc_info=True)
identifier = identifier.lower()
+
if source == 'isbn':
from isbnlib import to_isbn13
identifier = to_isbn13(identifier)
+
return f'{source}:{identifier}'
@@ -51,6 +69,7 @@ regexes = {
'pmid': re.compile(r'[1-9][0-9]{0,7}'),
'pmcid': re.compile(r'PMC[0-9]+'),
'doi': re.compile(r'10\.[0-9]{4,9}/\S+'),
+ 'shortdoi': re.compile(r'10/[a-zA-Z0-9]+'),
'wikidata': re.compile(r'Q[0-9]+'),
}
@@ -83,15 +102,23 @@ def inspect_citation_identifier(citation):
)
if source == 'doi':
- # https://www.crossref.org/blog/dois-and-matching-regular-expressions/
- if not identifier.startswith('10.'):
- return (
- 'DOIs must start with `10.`.'
- )
- elif not regexes['doi'].fullmatch(identifier):
+ if identifier.startswith('10.'):
+ # https://www.crossref.org/blog/dois-and-matching-regular-expressions/
+ if not regexes['doi'].fullmatch(identifier):
+ return (
+ 'Identifier does not conform to the DOI regex. '
+ 'Double check the DOI.'
+ )
+ elif identifier.startswith('10/'):
+ # shortDOI, see http://shortdoi.org
+ if not regexes['shortdoi'].fullmatch(identifier):
+ return (
+ 'Identifier does not conform to the shortDOI regex. '
+ 'Double check the shortDOI.'
+ )
+ else:
return (
- 'Identifier does not conform to the DOI regex. '
- 'Double check the DOI.'
+ 'DOIs must start with `10.` (or `10/` for shortDOIs).'
)
if source == 'isbn':
|
manubot/manubot
|
1b3ac66de9220e0b7496128a4a45e444b6d4edb1
|
diff --git a/manubot/cite/tests/test_doi.py b/manubot/cite/tests/test_doi.py
new file mode 100644
index 0000000..de7781c
--- /dev/null
+++ b/manubot/cite/tests/test_doi.py
@@ -0,0 +1,20 @@
+import pytest
+
+from manubot.cite.doi import (
+ expand_short_doi,
+)
+
+
+def test_expand_short_doi():
+ doi = expand_short_doi('10/b6vnmd')
+ assert doi == "10.1016/s0933-3657(96)00367-3"
+
+
+def test_expand_short_doi_invalid():
+ with pytest.raises(ValueError, match='Handle not found. Double check short_doi'):
+ expand_short_doi('10/b6vnmdxxxxxx')
+
+
+def test_expand_short_doi_not_short():
+ with pytest.raises(ValueError, match='shortDOIs start with `10/`'):
+ expand_short_doi('10.1016/S0933-3657(96)00367-3')
diff --git a/manubot/cite/tests/test_util.py b/manubot/cite/tests/test_util.py
index 700fb44..ab1da27 100644
--- a/manubot/cite/tests/test_util.py
+++ b/manubot/cite/tests/test_util.py
@@ -50,6 +50,9 @@ def test_get_citation_id(standard_citation, expected):
@pytest.mark.parametrize("citation,expected", [
('doi:10.5061/DRYAD.q447c/1', 'doi:10.5061/dryad.q447c/1'),
('doi:10.5061/dryad.q447c/1', 'doi:10.5061/dryad.q447c/1'),
+ ('doi:10/b6vnmd', 'doi:10.1016/s0933-3657(96)00367-3'),
+ ('doi:10/B6VNMD', 'doi:10.1016/s0933-3657(96)00367-3'),
+ ('doi:10/xxxxxxxxxxxxxYY', 'doi:10/xxxxxxxxxxxxxyy'), # passthrough non-existent shortDOI
('pmid:24159271', 'pmid:24159271'),
('isbn:1339919885', 'isbn:9781339919881'),
('isbn:1-339-91988-5', 'isbn:9781339919881'),
@@ -60,7 +63,7 @@ def test_get_citation_id(standard_citation, expected):
])
def test_standardize_citation(citation, expected):
"""
- Standardize idenfiers based on their source
+ Standardize identifiers based on their source
"""
output = standardize_citation(citation)
assert output == expected
@@ -68,6 +71,7 @@ def test_standardize_citation(citation, expected):
@pytest.mark.parametrize('citation', [
'doi:10.7717/peerj.705',
+ 'doi:10/b6vnmd',
'pmcid:PMC4304851',
'pmid:25648772',
'arxiv:1407.3561',
@@ -86,7 +90,9 @@ def test_inspect_citation_identifier_passes(citation):
@pytest.mark.parametrize(['citation', 'contains'], [
('doi:10.771/peerj.705', 'Double check the DOI'),
+ ('doi:10/b6v_nmd', 'Double check the shortDOI'),
('doi:7717/peerj.705', 'must start with `10.`'),
+ ('doi:b6vnmd', 'must start with `10.`'),
('pmcid:25648772', 'must start with `PMC`'),
('pmid:PMC4304851', 'Should pmid:PMC4304851 switch the citation source to `pmcid`?'),
('isbn:1-339-91988-X', 'identifier violates the ISBN syntax'),
diff --git a/manubot/process/tests/manuscripts/example/content/02.body.md b/manubot/process/tests/manuscripts/example/content/02.body.md
index e179734..7372fc4 100644
--- a/manubot/process/tests/manuscripts/example/content/02.body.md
+++ b/manubot/process/tests/manuscripts/example/content/02.body.md
@@ -10,6 +10,7 @@ The website, started in 2011, is run by Alexandra Elbakyan, a graduate student a
Elbakyan describes herself as motivated to provide universal access to knowledge [@url:https://engineuring.wordpress.com/2016/03/11/sci-hub-is-a-goal-changing-the-system-is-a-method/; @url:https://www.courtlistener.com/docket/4355308/50/elsevier-inc-v-sci-hub/; @url:http://www.leafscience.org/alexandra-elbakyan/].
Cite the same paper in three ways [@pmid:25648772; @pmcid:PMC4304851; @doi:10.7717/peerj.705].
+Here's a citation of the same paper using its shortDOI [@doi:10/98p], which should be grouped with the other DOI reference.
Cite using a `raw` citation @raw:model-free-data.
Let's cite the same citation but using a `tag`, i.e. @tag:model-free-data.
|
shortDOI citation support
Supporting citations of shortDOIs could be a nice feature. I see the major benefits as saving characters for excessively long DOIs and enabling a way to directly cite DOIs with forbidden characters such as `10.1016/S0933-3657(96)00367-3`. At least one user has attempted this workaround in https://github.com/Benjamin-Lee/deep-rules/pull/117/commits/c76ee4d20fa3f45935194b43b495b8159cbd30b4 / https://github.com/Benjamin-Lee/deep-rules/pull/117#issuecomment-456227977.
It seems like we could support a few different syntaxes:
1. `@doi:10/b6vnmd`
2. `@doi:b6vnmd`
3. `@shortdoi:b6vnmd`
4. `@shortdoi:10/b6vnmd`
I didn't see much shortDOI documentation online, but there is some provided when viewing a [shortened DOI result](http://web.archive.org/web/20190123160439/http://shortdoi.org/10.1016/S0933-3657(96)00367-3)
```
Your request was processed. The previously-created shortcut for 10.1016/S0933-3657(96)00367-3 is the handle:
10/b6vnmd
The shortcut HTTP URI is:
http://doi.org/b6vnmd
This shortcut will return the same results as http://dx.doi.org/10.1016/S0933-3657(96)00367-3, and doi:10/b6vnmd can be used in place of doi:10.1016/S0933-3657(96)00367-3.
```
Given the documentation, it seems that option 1 is the most canonical method. However, method 3 & 4 could help avoid user confusion.
***
Note that DOI content negotiation for crossref DOIs appears to work:
```
curl --location --header "Accept: application/vnd.citationstyles.csl+json" https://doi.org/b6vnmd
```
|
0.0
|
1b3ac66de9220e0b7496128a4a45e444b6d4edb1
|
[
"manubot/cite/tests/test_doi.py::test_expand_short_doi",
"manubot/cite/tests/test_doi.py::test_expand_short_doi_invalid",
"manubot/cite/tests/test_doi.py::test_expand_short_doi_not_short",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@doi:10.5061/dryad.q447c/1]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@arxiv:1407.3561v1]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@doi:10.1007/978-94-015-6859-3_4]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@tag:tag_with_underscores]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@tag:tag-with-hyphens]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@url:https://greenelab.github.io/manubot-rootstock/]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@tag:abc123]",
"manubot/cite/tests/test_util.py::test_citation_pattern_match[@tag:123abc]",
"manubot/cite/tests/test_util.py::test_citation_pattern_no_match[doi:10.5061/dryad.q447c/1]",
"manubot/cite/tests/test_util.py::test_citation_pattern_no_match[@tag:abc123-]",
"manubot/cite/tests/test_util.py::test_citation_pattern_no_match[@tag:abc123_]",
"manubot/cite/tests/test_util.py::test_citation_pattern_no_match[@-tag:abc123]",
"manubot/cite/tests/test_util.py::test_citation_pattern_no_match[@_tag:abc123]",
"manubot/cite/tests/test_util.py::test_get_citation_id[doi:10.5061/dryad.q447c/1-kQFQ8EaO]",
"manubot/cite/tests/test_util.py::test_get_citation_id[arxiv:1407.3561v1-16kozZ9Ys]",
"manubot/cite/tests/test_util.py::test_get_citation_id[pmid:24159271-11sli93ov]",
"manubot/cite/tests/test_util.py::test_get_citation_id[url:http://blog.dhimmel.com/irreproducible-timestamps/-QBWMEuxW]",
"manubot/cite/tests/test_util.py::test_standardize_citation[doi:10.5061/DRYAD.q447c/1-doi:10.5061/dryad.q447c/1]",
"manubot/cite/tests/test_util.py::test_standardize_citation[doi:10.5061/dryad.q447c/1-doi:10.5061/dryad.q447c/1]",
"manubot/cite/tests/test_util.py::test_standardize_citation[doi:10/b6vnmd-doi:10.1016/s0933-3657(96)00367-3]",
"manubot/cite/tests/test_util.py::test_standardize_citation[doi:10/B6VNMD-doi:10.1016/s0933-3657(96)00367-3]",
"manubot/cite/tests/test_util.py::test_standardize_citation[doi:10/xxxxxxxxxxxxxYY-doi:10/xxxxxxxxxxxxxyy]",
"manubot/cite/tests/test_util.py::test_standardize_citation[pmid:24159271-pmid:24159271]",
"manubot/cite/tests/test_util.py::test_standardize_citation[isbn:1339919885-isbn:9781339919881]",
"manubot/cite/tests/test_util.py::test_standardize_citation[isbn:1-339-91988-5-isbn:9781339919881]",
"manubot/cite/tests/test_util.py::test_standardize_citation[isbn:978-0-387-95069-3-isbn:9780387950693]",
"manubot/cite/tests/test_util.py::test_standardize_citation[isbn:9780387950938-isbn:9780387950938]",
"manubot/cite/tests/test_util.py::test_standardize_citation[isbn:1-55860-510-X-isbn:9781558605107]",
"manubot/cite/tests/test_util.py::test_standardize_citation[isbn:1-55860-510-x-isbn:9781558605107]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[doi:10.7717/peerj.705]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[doi:10/b6vnmd]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[pmcid:PMC4304851]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[pmid:25648772]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[arxiv:1407.3561]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[isbn:978-1-339-91988-1]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[isbn:1-339-91988-5]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[wikidata:Q1]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[wikidata:Q50051684]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_passes[url:https://peerj.com/articles/705/]",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[doi:10.771/peerj.705-Double",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[doi:10/b6v_nmd-Double",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[doi:7717/peerj.705-must",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[doi:b6vnmd-must",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[pmcid:25648772-must",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[pmid:PMC4304851-Should",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[isbn:1-339-91988-X-identifier",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[wikidata:P212-item",
"manubot/cite/tests/test_util.py::test_inspect_citation_identifier_fails[wikidata:QABCD-does",
"manubot/cite/tests/test_util.py::test_citation_to_citeproc_arxiv",
"manubot/cite/tests/test_util.py::test_citation_to_citeproc_isbn"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-23 22:45:15+00:00
|
bsd-3-clause
| 3,695 |
|
manuzhang__mkdocs-htmlproofer-plugin-75
|
diff --git a/README.md b/README.md
index 0eb0eb4..3ee32b6 100644
--- a/README.md
+++ b/README.md
@@ -28,12 +28,6 @@ plugins:
- htmlproofer
```
-To enable cross-page anchor validation, you must set `use_directory_urls = False` in `mkdocs.yml`:
-
-```yaml
-use_directory_urls: False
-```
-
## Configuring
### `enabled`
@@ -147,11 +141,11 @@ plugins:
## Compatibility with `attr_list` extension
-If you need to manually specify anchors make use of the `attr_list` [extension](https://python-markdown.github.io/extensions/attr_list) in the markdown.
+If you need to manually specify anchors make use of the `attr_list` [extension](https://python-markdown.github.io/extensions/attr_list) in the markdown.
This can be useful for multilingual documentation to keep anchors as language neutral permalinks in all languages.
* A sample for a heading `# Grüße {#greetings}` (the slugified generated anchor `Gre` is overwritten with `greetings`).
-* This also works for images `this is a nice image [](foo-bar.png){#nice-image}`
+* This also works for images `this is a nice image [](foo-bar.png){#nice-image}`
* And generall for paragraphs:
```markdown
Listing: This is noteworthy.
@@ -164,4 +158,4 @@ More information about plugins in the [MkDocs documentation](http://www.mkdocs.o
## Acknowledgement
-This work is based on the [mkdocs-markdownextradata-plugin](https://github.com/rosscdh/mkdocs-markdownextradata-plugin) project and the [Finding and Fixing Website Link Rot with Python, BeautifulSoup and Requests](https://www.twilio.com/en-us/blog/find-fix-website-link-rot-python-beautifulsoup-requests-html) article.
+This work is based on the [mkdocs-markdownextradata-plugin](https://github.com/rosscdh/mkdocs-markdownextradata-plugin) project and the [Finding and Fixing Website Link Rot with Python, BeautifulSoup and Requests](https://www.twilio.com/en-us/blog/find-fix-website-link-rot-python-beautifulsoup-requests-html) article.
diff --git a/htmlproofer/plugin.py b/htmlproofer/plugin.py
index 77fbbd2..59d62fa 100644
--- a/htmlproofer/plugin.py
+++ b/htmlproofer/plugin.py
@@ -98,14 +98,13 @@ class HtmlProoferPlugin(BasePlugin):
if not self.config['enabled']:
return
- use_directory_urls = config.data["use_directory_urls"]
-
# Optimization: At this point, we have all the files, so we can create
# a dictionary for faster lookups. Prior to this point, files are
# still being updated so creating a dictionary before now would result
# in incorrect values appearing as the key.
opt_files = {}
opt_files.update({os.path.normpath(file.url): file for file in self.files})
+ opt_files.update({os.path.normpath(file.src_uri): file for file in self.files})
# Optimization: only parse links and headings
# li, sup are used for footnotes
@@ -123,7 +122,7 @@ class HtmlProoferPlugin(BasePlugin):
if self.config['warn_on_ignored_urls']:
log_warning(f"ignoring URL {url} from {page.file.src_path}")
else:
- url_status = self.get_url_status(url, page.file.src_path, all_element_ids, opt_files, use_directory_urls)
+ url_status = self.get_url_status(url, page.file.src_path, all_element_ids, opt_files)
if self.bad_url(url_status) and self.is_error(self.config, url, url_status):
self.report_invalid_url(url, url_status, page.file.src_path)
@@ -161,8 +160,7 @@ class HtmlProoferPlugin(BasePlugin):
url: str,
src_path: str,
all_element_ids: Set[str],
- files: Dict[str, File],
- use_directory_urls: bool
+ files: Dict[str, File]
) -> int:
if any(pat.match(url) for pat in LOCAL_PATTERNS):
return 0
@@ -174,18 +172,13 @@ class HtmlProoferPlugin(BasePlugin):
return 0
if fragment and not path:
return 0 if url[1:] in all_element_ids else 404
- elif not use_directory_urls:
- # use_directory_urls = True injects too many challenges for locating the correct target
- # Markdown file, so disable target anchor validation in this case. Examples include:
- # ../..#BAD_ANCHOR style links to index.html and extra ../ inserted into relative
- # links.
+ else:
is_valid = self.is_url_target_valid(url, src_path, files)
url_status = 404
if not is_valid and self.is_error(self.config, url, url_status):
log_warning(f"Unable to locate source file for: {url}")
return url_status
return 0
- return 0
@staticmethod
def is_url_target_valid(url: str, src_path: str, files: Dict[str, File]) -> bool:
@@ -225,9 +218,14 @@ class HtmlProoferPlugin(BasePlugin):
# Convert root/site paths
search_path = os.path.normpath(url[1:])
else:
- # Handle relative links by concatenating the source dir with the destination path
- src_dir = urllib.parse.quote(str(pathlib.Path(src_path).parent), safe='/\\')
- search_path = os.path.normpath(str(pathlib.Path(src_dir) / pathlib.Path(url)))
+ # Handle relative links by looking up the destination url for the
+ # src_path and getting the parent directory.
+ try:
+ dest_uri = files[src_path].dest_uri
+ src_dir = urllib.parse.quote(str(pathlib.Path(dest_uri).parent), safe='/\\')
+ search_path = os.path.normpath(str(pathlib.Path(src_dir) / pathlib.Path(url)))
+ except KeyError:
+ return None
try:
return files[search_path]
|
manuzhang/mkdocs-htmlproofer-plugin
|
14c97ccfafe54caca261d739fd495d6d1e7c1a69
|
diff --git a/tests/integration/docs/nested/page1.md b/tests/integration/docs/nested/page1.md
index 338df37..b29e14e 100644
--- a/tests/integration/docs/nested/page1.md
+++ b/tests/integration/docs/nested/page1.md
@@ -8,7 +8,7 @@ you can either link to a local file without or with an anchor.
* [Main Page](../index.md)
* [Sub-Page](./page2.md)
* <figure markdown>
- <a href="../assets/hello-world.drawio.svg">
+ <a href="/assets/hello-world.drawio.svg">

</a>
</figure>
@@ -27,6 +27,6 @@ But allows valid anchors such as
## Image Link absolute/relative
-<a href="../assets/hello-world.drawio.svg"></a>
+<a href="/assets/hello-world.drawio.svg"></a>
<a href="/assets/hello-world.drawio.svg"></a>
diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py
index a17f2a6..2645779 100644
--- a/tests/unit/test_plugin.py
+++ b/tests/unit/test_plugin.py
@@ -115,7 +115,7 @@ def test_on_post_page__plugin_disabled():
),
)
def test_get_url_status__ignore_local_servers(plugin, empty_files, url):
- assert plugin.get_url_status(url, 'src/path.md', set(), empty_files, False) == 0
+ assert plugin.get_url_status(url, 'src/path.md', set(), empty_files) == 0
@pytest.mark.parametrize(
@@ -126,7 +126,7 @@ def test_get_url_status(validate_external: bool):
plugin.load_config({'validate_external_urls': validate_external})
def get_url():
- return plugin.get_url_status('https://google.com', 'src/path.md', set(), empty_files, False)
+ return plugin.get_url_status('https://google.com', 'src/path.md', set(), empty_files)
if validate_external:
with pytest.raises(Exception):
@@ -172,9 +172,9 @@ def test_contains_anchor(plugin, markdown, anchor, expected):
def test_get_url_status__same_page_anchor(plugin, empty_files):
- assert plugin.get_url_status('#ref', 'src/path.md', {'ref'}, empty_files, False) == 0
- assert plugin.get_url_status('##ref', 'src/path.md', {'ref'}, empty_files, False) == 404
- assert plugin.get_url_status('#ref', 'src/path.md', set(), empty_files, False) == 404
+ assert plugin.get_url_status('#ref', 'src/path.md', {'ref'}, empty_files) == 0
+ assert plugin.get_url_status('##ref', 'src/path.md', {'ref'}, empty_files) == 404
+ assert plugin.get_url_status('#ref', 'src/path.md', set(), empty_files) == 404
@pytest.mark.parametrize(
@@ -195,7 +195,7 @@ def test_get_url_status__external(plugin, empty_files, url):
with patch.object(HtmlProoferPlugin, "get_external_url") as mock_get_ext_url:
mock_get_ext_url.return_value = expected_status
- status = plugin.get_url_status(url, src_path, set(), empty_files, False)
+ status = plugin.get_url_status(url, src_path, set(), empty_files)
mock_get_ext_url.assert_called_once_with(url, scheme, src_path)
assert status == expected_status
@@ -241,39 +241,64 @@ def test_get_url_status__local_page(plugin):
index_page = Mock(spec=Page, markdown='# Heading\nContent')
page1_page = Mock(spec=Page, markdown='# Page One\n## Sub Heading\nContent')
special_char_page = Mock(spec=Page, markdown='# Heading éèà\n## Sub Heading éèà\nContent')
- files = {os.path.normpath(file.url): file for file in Files([
- Mock(spec=File, src_path='index.md', dest_path='index.html', url='index.html', page=index_page),
- Mock(spec=File, src_path='page1.md', dest_path='page1.html', url='page1.html', page=page1_page),
+ mock_files = Files([
+ Mock(spec=File, src_path='index.md', dest_path='index.html',
+ dest_uri='index.html', url='index.html', src_uri='index.md',
+ page=index_page),
+ Mock(spec=File, src_path='page1.md', dest_path='page1.html',
+ dest_uri='page1.html', url='page1.html', src_uri='page1.md',
+ page=page1_page),
Mock(spec=File, src_path='Dir éèà/éèà.md', dest_path='Dir éèà/éèà.html',
- url='Dir%20%C3%A9%C3%A8%C3%A0/%C3%A9%C3%A8%C3%A0.html', page=special_char_page),
- ])}
+ dest_uri='Dir éèà/éèà.html',
+ url='Dir%20%C3%A9%C3%A8%C3%A0/%C3%A9%C3%A8%C3%A0.html',
+ src_uri='Dir éèà/éèà.md', page=special_char_page),
+ Mock(spec=File, src_path='Dir éèà/page1.md', dest_path='Dir éèà/page1.html',
+ dest_uri='Dir éèà/page1.html',
+ url='Dir%20%C3%A9%C3%A8%C3%A0/page1.html',
+ src_uri='Dir%20%C3%A9%C3%A8%C3%A0/page1.md',
+ page=special_char_page),
+ ])
+ files = {}
+ files.update({os.path.normpath(file.url): file for file in mock_files})
+ files.update({file.src_uri: file for file in mock_files})
- assert plugin.get_url_status('index.html', 'page1.md', set(), files, False) == 0
- assert plugin.get_url_status('index.html#heading', 'page1.md', set(), files, False) == 0
- assert plugin.get_url_status('index.html#bad-heading', 'page1.md', set(), files, False) == 404
+ assert plugin.get_url_status('index.html', 'page1.md', set(), files) == 0
+ assert plugin.get_url_status('index.html#heading', 'page1.md', set(), files) == 0
+ assert plugin.get_url_status('index.html#bad-heading', 'page1.md', set(), files) == 404
- assert plugin.get_url_status('page1.html', 'page1.md', set(), files, False) == 0
- assert plugin.get_url_status('page1.html#sub-heading', 'page1.md', set(), files, False) == 0
- assert plugin.get_url_status('page1.html#heading', 'page1.md', set(), files, False) == 404
+ assert plugin.get_url_status('page1.html', 'page1.md', set(), files) == 0
+ assert plugin.get_url_status('page1.html#sub-heading', 'page1.md', set(), files) == 0
+ assert plugin.get_url_status('page1.html#heading', 'page1.md', set(), files) == 404
- assert plugin.get_url_status('page2.html', 'page1.md', set(), files, False) == 404
- assert plugin.get_url_status('page2.html#heading', 'page1.md', set(), files, False) == 404
+ assert plugin.get_url_status('page2.html', 'page1.md', set(), files) == 404
+ assert plugin.get_url_status('page2.html#heading', 'page1.md', set(), files) == 404
- assert plugin.get_url_status('Dir%20%C3%A9%C3%A8%C3%A0/%C3%A9%C3%A8%C3%A0.html#sub-heading-eea',
- 'page1.md', set(), files, False) == 0
- assert plugin.get_url_status('%C3%A9%C3%A8%C3%A0.html#sub-heading-eea', 'Dir éèà/page3.md', set(), files, False) == 0
+ assert plugin.get_url_status(
+ 'Dir%20%C3%A9%C3%A8%C3%A0/%C3%A9%C3%A8%C3%A0.html#sub-heading-eea',
+ 'page1.md', set(), files) == 0
+ assert plugin.get_url_status(
+ '%C3%A9%C3%A8%C3%A0.html#sub-heading-eea',
+ 'Dir%20%C3%A9%C3%A8%C3%A0/page1.md',
+ set(), files) == 0
def test_get_url_status__non_markdown_page(plugin):
index_page = Mock(spec=Page, markdown='# Heading\nContent')
- files = {os.path.normpath(file.url): file for file in Files([
- Mock(spec=File, src_path='index.md', dest_path='index.html', url='index.html', page=index_page),
- Mock(spec=File, src_path='drawing.svg', dest_path='drawing.svg', url='drawing.svg', page=None),
- ])}
+ mock_files = Files([
+ Mock(spec=File, src_path='index.md', dest_path='index.html',
+ dest_uri='index.html', url='index.html', src_uri='index.md',
+ page=index_page),
+ Mock(spec=File, src_path='drawing.svg', dest_path='drawing.svg',
+ dest_uri='index.html', url='drawing.svg', src_uri='drawing.svg',
+ page=None),
+ ])
+ files = {}
+ files.update({os.path.normpath(file.url): file for file in mock_files})
+ files.update({file.src_uri: file for file in mock_files})
- assert plugin.get_url_status('drawing.svg', 'index.md', set(), files, False) == 0
- assert plugin.get_url_status('/drawing.svg', 'index.md', set(), files, False) == 0
- assert plugin.get_url_status('not-existing.svg', 'index.md', set(), files, False) == 404
+ assert plugin.get_url_status('drawing.svg', 'index.md', set(), files) == 0
+ assert plugin.get_url_status('/drawing.svg', 'index.md', set(), files) == 0
+ assert plugin.get_url_status('not-existing.svg', 'index.md', set(), files) == 404
def test_get_url_status__local_page_nested(plugin):
@@ -282,48 +307,67 @@ def test_get_url_status__local_page_nested(plugin):
nested1_sibling_page = Mock(spec=Page, markdown='# Nested Sibling')
nested2_page = Mock(spec=Page, markdown='# Nested\n## Nested Two\nContent')
nested2_sibling_page = Mock(spec=Page, markdown='# Nested Sibling')
- files = {os.path.normpath(file.url): file for file in Files([
- Mock(spec=File, src_path='index.md', dest_path='index.html', url='index.html', page=index_page),
+ mock_files = Files([
+ Mock(
+ spec=File,
+ src_path='index.md',
+ dest_path='index.html',
+ dest_uri='index.html',
+ url='index.html',
+ src_uri='index.md',
+ page=index_page),
Mock(
spec=File,
src_path='foo/bar/nested.md',
dest_path='foo/bar/nested.html',
+ dest_uri='foo/bar/nested.html',
url='foo/bar/nested.html',
+ src_uri='foo/bar/nested.md',
page=nested1_page
),
Mock(
spec=File,
src_path='foo/bar/sibling.md',
dest_path='foo/bar/sibling.html',
+ dest_uri='foo/bar/sibling.html',
url='foo/bar/sibling.html',
+ src_uri='foo/bar/sibling.md',
page=nested1_sibling_page
),
Mock(
spec=File,
src_path='foo/baz/nested.md',
dest_path='foo/baz/nested.html',
+ dest_uri='foo/baz/nested.html',
url='foo/baz/nested.html',
+ src_uri='foo/baz/nested.md',
page=nested2_page
),
Mock(
spec=File,
src_path='foo/baz/sibling.md',
dest_path='foo/baz/sibling.html',
+ dest_uri='foo/baz/sibling.html',
url='foo/baz/sibling.html',
+ src_uri='foo/baz/sibling.md',
page=nested2_sibling_page
),
- ])}
+ ])
+
+ files = {}
+ files.update({os.path.normpath(file.url): file for file in mock_files})
+ files.update({file.src_uri: file for file in mock_files})
- assert plugin.get_url_status('nested.html#nested-one', 'foo/bar/sibling.md', set(), files, False) == 0
- assert plugin.get_url_status('nested.html#nested-two', 'foo/bar/sibling.md', set(), files, False) == 404
+ assert plugin.get_url_status('nested.html#nested-one', 'foo/bar/sibling.md', set(), files) == 0
+ assert plugin.get_url_status('nested.html#nested-two', 'foo/bar/sibling.md', set(), files) == 404
- assert plugin.get_url_status('nested.html#nested-two', 'foo/baz/sibling.md', set(), files, False) == 0
- assert plugin.get_url_status('nested.html#nested-one', 'foo/baz/sibling.md', set(), files, False) == 404
+ assert plugin.get_url_status('nested.html#nested-two', 'foo/baz/sibling.md', set(), files) == 0
+ assert plugin.get_url_status('nested.html#nested-one', 'foo/baz/sibling.md', set(), files) == 404
- assert plugin.get_url_status('foo/bar/nested.html#nested-one', 'index.md', set(), files, False) == 0
- assert plugin.get_url_status('foo/baz/nested.html#nested-two', 'index.md', set(), files, False) == 0
+ assert plugin.get_url_status('foo/bar/nested.html#nested-one', 'index.md', set(), files) == 0
+ assert plugin.get_url_status('foo/baz/nested.html#nested-two', 'index.md', set(), files) == 0
- assert plugin.get_url_status('/index.html', 'foo/baz/sibling.md', set(), files, False) == 0
+ assert plugin.get_url_status('/index.html', 'foo/baz/sibling.md', set(), files) == 0
@patch.object(htmlproofer.plugin, "log_warning", autospec=True)
@@ -334,7 +378,7 @@ def test_get_url_status__excluded_non_existing_relative_url__no_warning(log_warn
files = {}
plugin.config['raise_error_excludes'][url_status] = [url]
- status = plugin.get_url_status(url, src_path, set(), files, False)
+ status = plugin.get_url_status(url, src_path, set(), files)
log_warning_mock.assert_not_called()
assert 0 == status
@@ -349,12 +393,12 @@ def test_get_url_status__excluded_existing_relative_url__no_warning(log_warning_
existing_page = Mock(spec=Page, markdown='')
files = {
os.path.normpath(file.url): file for file in Files([
- Mock(spec=File, src_path=src_path, dest_path=url, url=url, page=existing_page)
+ Mock(spec=File, src_path=src_path, dest_path=url, dest_uri=url, url=url, src_uri=src_path, page=existing_page)
])
}
plugin.config['raise_error_excludes'][url_status] = [url]
- status = plugin.get_url_status(url, src_path, set(), files, False)
+ status = plugin.get_url_status(url, src_path, set(), files)
log_warning_mock.assert_not_called()
assert 0 == status
@@ -367,7 +411,7 @@ def test_get_url_status__non_existing_relative_url__warning_and_404(log_warning_
src_path = "index.md"
files = {}
- status = plugin.get_url_status(url, src_path, set(), files, False)
+ status = plugin.get_url_status(url, src_path, set(), files)
log_warning_mock.assert_called_once()
assert expected_url_status == status
|
disabling the implicit use_directory_urls renders plugin useless for most
**Describe the bug**
As mkdocs default/implicit settings are `use_directory_urls = true` for good reasons, most people will use that, as it does produce nice, human-friendly URLs without useless html extension.
Still, this plugin fails to perform testing for these, which is likely >95% of mkdocs users. Exact numbers can easily be computed using a https://sourcegraph.com/ query that checks how many do disable these.
Is there any chance to make the plugin work for those using directory urls?
|
0.0
|
14c97ccfafe54caca261d739fd495d6d1e7c1a69
|
[
"tests/unit/test_plugin.py::test_get_url_status__ignore_local_servers[http://localhost/]",
"tests/unit/test_plugin.py::test_get_url_status__ignore_local_servers[https://127.0.0.1/something]",
"tests/unit/test_plugin.py::test_get_url_status__ignore_local_servers[http://app_server/#foo]",
"tests/unit/test_plugin.py::test_get_url_status[False]",
"tests/unit/test_plugin.py::test_get_url_status__same_page_anchor",
"tests/unit/test_plugin.py::test_get_url_status__external[https://extwebsite.com]",
"tests/unit/test_plugin.py::test_get_url_status__external[http://extwebsite.com]",
"tests/unit/test_plugin.py::test_get_url_status__external[https://website.net/path#anchor]",
"tests/unit/test_plugin.py::test_get_url_status__external[mailto:[email protected]]",
"tests/unit/test_plugin.py::test_get_url_status__external[steam://application]",
"tests/unit/test_plugin.py::test_get_url_status__external[file://file]",
"tests/unit/test_plugin.py::test_get_url_status__local_page",
"tests/unit/test_plugin.py::test_get_url_status__non_markdown_page",
"tests/unit/test_plugin.py::test_get_url_status__local_page_nested",
"tests/unit/test_plugin.py::test_get_url_status__excluded_non_existing_relative_url__no_warning",
"tests/unit/test_plugin.py::test_get_url_status__excluded_existing_relative_url__no_warning",
"tests/unit/test_plugin.py::test_get_url_status__non_existing_relative_url__warning_and_404"
] |
[
"tests/unit/test_plugin.py::test_on_post_build[False-False]",
"tests/unit/test_plugin.py::test_on_post_build[False-True]",
"tests/unit/test_plugin.py::test_on_post_build[True-False]",
"tests/unit/test_plugin.py::test_on_post_build[True-True]",
"tests/unit/test_plugin.py::test_on_post_page[False-False-False]",
"tests/unit/test_plugin.py::test_on_post_page[False-False-True]",
"tests/unit/test_plugin.py::test_on_post_page[False-True-False]",
"tests/unit/test_plugin.py::test_on_post_page[False-True-True]",
"tests/unit/test_plugin.py::test_on_post_page[True-False-False]",
"tests/unit/test_plugin.py::test_on_post_page[True-False-True]",
"tests/unit/test_plugin.py::test_on_post_page[True-True-False]",
"tests/unit/test_plugin.py::test_on_post_page[True-True-True]",
"tests/unit/test_plugin.py::test_on_post_page__plugin_disabled",
"tests/unit/test_plugin.py::test_get_url_status[True]",
"tests/unit/test_plugin.py::test_contains_anchor[git",
"tests/unit/test_plugin.py::test_contains_anchor[##",
"tests/unit/test_plugin.py::test_contains_anchor[see",
"tests/unit/test_plugin.py::test_contains_anchor[paragraph",
"tests/unit/test_plugin.py::test_get_external_url__web_scheme[http]",
"tests/unit/test_plugin.py::test_get_external_url__web_scheme[https]",
"tests/unit/test_plugin.py::test_get_external_url__unknown_scheme[mailto]",
"tests/unit/test_plugin.py::test_get_external_url__unknown_scheme[file]",
"tests/unit/test_plugin.py::test_get_external_url__unknown_scheme[steam]",
"tests/unit/test_plugin.py::test_get_external_url__unknown_scheme[abc]",
"tests/unit/test_plugin.py::test_report_invalid_url__raise_error__highest_priority",
"tests/unit/test_plugin.py::test_report_invalid_url__raise_error__raises_and_no_log",
"tests/unit/test_plugin.py::test_report_invalid_url__raise_error_after_finish__log_error_is_called",
"tests/unit/test_plugin.py::test_report_invalid_url__not_raise_error__only_log_warning_is_called"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-26 21:23:36+00:00
|
mit
| 3,696 |
|
mapado__haversine-65
|
diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml
index 03a4139..42bc10b 100644
--- a/.github/workflows/python-app.yml
+++ b/.github/workflows/python-app.yml
@@ -3,7 +3,8 @@
name: Python application
-on:
+on:
+ workflow_dispatch: ~
push:
branches: [main]
pull_request:
diff --git a/README.md b/README.md
index a76b4c0..d267e98 100755
--- a/README.md
+++ b/README.md
@@ -88,7 +88,8 @@ inverse_haversine(paris, 10, Direction.SOUTH, unit=Unit.NAUTICAL_MILES)
### Performance optimisation for distances between all points in two vectors
-You will need to add [numpy](https://pypi.org/project/numpy/) in order to gain performance with vectors.
+You will need to install [numpy](https://pypi.org/project/numpy/) in order to gain performance with vectors.
+For optimal performance, you can turn off coordinate checking by adding `check=False` and install the optional packages [numba](https://pypi.org/project/numba/) and [icc_rt](https://pypi.org/project/icc_rt/).
You can then do this:
diff --git a/haversine/__init__.py b/haversine/__init__.py
index 806fe82..9643763 100755
--- a/haversine/__init__.py
+++ b/haversine/__init__.py
@@ -1,1 +1,1 @@
-from .haversine import Unit, haversine, haversine_vector, Direction, inverse_haversine
+from .haversine import Unit, haversine, haversine_vector, Direction, inverse_haversine, inverse_haversine_vector
diff --git a/haversine/haversine.py b/haversine/haversine.py
index 8140ef9..f780d0d 100755
--- a/haversine/haversine.py
+++ b/haversine/haversine.py
@@ -1,13 +1,14 @@
-from math import radians, cos, sin, asin, sqrt, degrees, pi, atan2
from enum import Enum
+from math import pi
from typing import Union, Tuple
+import math
# mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
_AVG_EARTH_RADIUS_KM = 6371.0088
-class Unit(Enum):
+class Unit(str, Enum):
"""
Enumeration of supported units.
The full list can be checked by iterating over the class; e.g.
@@ -24,7 +25,7 @@ class Unit(Enum):
DEGREES = 'deg'
-class Direction(Enum):
+class Direction(float, Enum):
"""
Enumeration of supported directions.
The full list can be checked by iterating over the class; e.g.
@@ -32,7 +33,7 @@ class Direction(Enum):
Angles expressed in radians.
"""
- NORTH = 0
+ NORTH = 0.0
NORTHEAST = pi * 0.25
EAST = pi * 0.5
SOUTHEAST = pi * 0.75
@@ -56,7 +57,6 @@ _CONVERSIONS = {
def get_avg_earth_radius(unit):
- unit = Unit(unit)
return _AVG_EARTH_RADIUS_KM * _CONVERSIONS[unit]
@@ -72,6 +72,19 @@ def _normalize(lat: float, lon: float) -> Tuple[float, float]:
return lat, lon
+def _normalize_vector(lat: "numpy.ndarray", lon: "numpy.ndarray") -> Tuple["numpy.ndarray", "numpy.ndarray"]:
+ """
+ Normalize points to [-90, 90] latitude and [-180, 180] longitude.
+ """
+ lat = (lat + 90) % 360 - 90
+ lon = (lon + 180) % 360 - 180
+ wrap = lat > 90
+ if numpy.any(wrap):
+ lat[wrap] = 180 - lat[wrap]
+ lon[wrap] = lon[wrap] % 360 - 180
+ return lat, lon
+
+
def _ensure_lat_lon(lat: float, lon: float):
"""
Ensure that the given latitude and longitude have proper values. An exception is raised if they are not.
@@ -82,7 +95,92 @@ def _ensure_lat_lon(lat: float, lon: float):
raise ValueError(f"Longitude {lon} is out of range [-180, 180]")
-def haversine(point1, point2, unit=Unit.KILOMETERS, normalize=False):
+def _ensure_lat_lon_vector(lat: "numpy.ndarray", lon: "numpy.ndarray"):
+ """
+ Ensure that the given latitude and longitude have proper values. An exception is raised if they are not.
+ """
+ if numpy.abs(lat).max() > 90:
+ raise ValueError("Latitude(s) out of range [-90, 90]")
+ if numpy.abs(lon).max() > 180:
+ raise ValueError("Longitude(s) out of range [-180, 180]")
+
+
+def _explode_args(f):
+ return lambda ops: f(**ops.__dict__)
+
+
+@_explode_args
+def _create_haversine_kernel(*, asin=None, arcsin=None, cos, radians, sin, sqrt, **_):
+ asin = asin or arcsin
+
+ def _haversine_kernel(lat1, lng1, lat2, lng2):
+ """
+ Compute the haversine distance on unit sphere. Inputs are in degrees,
+ either scalars (with ops==math) or arrays (with ops==numpy).
+ """
+ lat1 = radians(lat1)
+ lng1 = radians(lng1)
+ lat2 = radians(lat2)
+ lng2 = radians(lng2)
+ lat = lat2 - lat1
+ lng = lng2 - lng1
+ d = (sin(lat * 0.5) ** 2
+ + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2)
+ # Note: 2 * atan2(sqrt(d), sqrt(1-d)) is more accurate at
+ # large distance (d is close to 1), but also slower.
+ return 2 * asin(sqrt(d))
+ return _haversine_kernel
+
+
+
+@_explode_args
+def _create_inverse_haversine_kernel(*, asin=None, arcsin=None, atan2=None, arctan2=None, cos, degrees, radians, sin, sqrt, **_):
+ asin = asin or arcsin
+ atan2 = atan2 or arctan2
+
+ def _inverse_haversine_kernel(lat, lng, direction, d):
+ """
+ Compute the inverse haversine on unit sphere. lat/lng are in degrees,
+ direction in radians; all inputs are either scalars (with ops==math) or
+ arrays (with ops==numpy).
+ """
+ lat = radians(lat)
+ lng = radians(lng)
+ cos_d, sin_d = cos(d), sin(d)
+ cos_lat, sin_lat = cos(lat), sin(lat)
+ sin_d_cos_lat = sin_d * cos_lat
+ return_lat = asin(cos_d * sin_lat + sin_d_cos_lat * cos(direction))
+ return_lng = lng + atan2(sin(direction) * sin_d_cos_lat,
+ cos_d - sin_lat * sin(return_lat))
+ return degrees(return_lat), degrees(return_lng)
+ return _inverse_haversine_kernel
+
+
+_haversine_kernel = _create_haversine_kernel(math)
+_inverse_haversine_kernel = _create_inverse_haversine_kernel(math)
+
+try:
+ import numpy
+ has_numpy = True
+ _haversine_kernel_vector = _create_haversine_kernel(numpy)
+ _inverse_haversine_kernel_vector = _create_inverse_haversine_kernel(numpy)
+except ModuleNotFoundError:
+ # Import error will be reported in haversine_vector() / inverse_haversine_vector()
+ has_numpy = False
+
+try:
+ import numba # type: ignore
+ if has_numpy:
+ _haversine_kernel_vector = numba.vectorize(fastmath=True)(_haversine_kernel)
+ # Tuple output is not supported for numba.vectorize. Just jit the numpy version.
+ _inverse_haversine_kernel_vector = numba.njit(fastmath=True)(_inverse_haversine_kernel_vector)
+ _haversine_kernel = numba.njit(_haversine_kernel)
+ _inverse_haversine_kernel = numba.njit(_inverse_haversine_kernel)
+except ModuleNotFoundError:
+ pass
+
+
+def haversine(point1, point2, unit=Unit.KILOMETERS, normalize=False, check=True):
""" Calculate the great-circle distance between two points on the Earth surface.
Takes two 2-tuples, containing the latitude and longitude of each point in decimal degrees,
@@ -94,6 +192,7 @@ def haversine(point1, point2, unit=Unit.KILOMETERS, normalize=False):
initials of its corresponding unit of measurement (i.e. miles = mi)
default 'km' (kilometers).
:param normalize: if True, normalize the points to [-90, 90] latitude and [-180, 180] longitude.
+ :param check: if True, check that points are normalized.
Example: ``haversine((45.7597, 4.8422), (48.8567, 2.3508), unit=Unit.METERS)``
@@ -115,25 +214,14 @@ def haversine(point1, point2, unit=Unit.KILOMETERS, normalize=False):
if normalize:
lat1, lng1 = _normalize(lat1, lng1)
lat2, lng2 = _normalize(lat2, lng2)
- else:
+ elif check:
_ensure_lat_lon(lat1, lng1)
_ensure_lat_lon(lat2, lng2)
- # convert all latitudes/longitudes from decimal degrees to radians
- lat1 = radians(lat1)
- lng1 = radians(lng1)
- lat2 = radians(lat2)
- lng2 = radians(lng2)
-
- # calculate haversine
- lat = lat2 - lat1
- lng = lng2 - lng1
- d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2
+ return get_avg_earth_radius(unit) * _haversine_kernel(lat1, lng1, lat2, lng2)
- return 2 * get_avg_earth_radius(unit) * asin(sqrt(d))
-
-def haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False, normalize=False):
+def haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False, normalize=False, check=True):
'''
The exact same function as "haversine", except that this
version replaces math functions with numpy functions.
@@ -141,11 +229,9 @@ def haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False, normalize
distance between two points, but is much faster for computing
the distance between two vectors of points due to vectorization.
'''
- try:
- import numpy
- except ModuleNotFoundError:
- return 'Error, unable to import Numpy,\
- consider using haversine instead of haversine_vector.'
+ if not has_numpy:
+ raise RuntimeError('Error, unable to import Numpy, '
+ 'consider using haversine instead of haversine_vector.')
# ensure arrays are numpy ndarrays
if not isinstance(array1, numpy.ndarray):
@@ -165,23 +251,17 @@ def haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False, normalize
raise IndexError(
"When not in combination mode, arrays must be of same size. If mode is required, use comb=True as argument.")
- # normalize points or ensure they are proper lat/lon, i.e., in [-90, 90] and [-180, 180]
- if normalize:
- array1 = numpy.array([_normalize(p[0], p[1]) for p in array1])
- array2 = numpy.array([_normalize(p[0], p[1]) for p in array2])
- else:
- [_ensure_lat_lon(p[0], p[1]) for p in array1]
- [_ensure_lat_lon(p[0], p[1]) for p in array2]
-
# unpack latitude/longitude
lat1, lng1 = array1[:, 0], array1[:, 1]
lat2, lng2 = array2[:, 0], array2[:, 1]
- # convert all latitudes/longitudes from decimal degrees to radians
- lat1 = numpy.radians(lat1)
- lng1 = numpy.radians(lng1)
- lat2 = numpy.radians(lat2)
- lng2 = numpy.radians(lng2)
+ # normalize points or ensure they are proper lat/lon, i.e., in [-90, 90] and [-180, 180]
+ if normalize:
+ lat1, lng1 = _normalize_vector(lat1, lng1)
+ lat2, lng2 = _normalize_vector(lat2, lng2)
+ elif check:
+ _ensure_lat_lon_vector(lat1, lng1)
+ _ensure_lat_lon_vector(lat2, lng2)
# If in combination mode, turn coordinates of array1 into column vectors for broadcasting
if comb:
@@ -190,27 +270,33 @@ def haversine_vector(array1, array2, unit=Unit.KILOMETERS, comb=False, normalize
lat2 = numpy.expand_dims(lat2, axis=1)
lng2 = numpy.expand_dims(lng2, axis=1)
- # calculate haversine
- lat = lat2 - lat1
- lng = lng2 - lng1
- d = (numpy.sin(lat * 0.5) ** 2
- + numpy.cos(lat1) * numpy.cos(lat2) * numpy.sin(lng * 0.5) ** 2)
-
- return 2 * get_avg_earth_radius(unit) * numpy.arcsin(numpy.sqrt(d))
+ return get_avg_earth_radius(unit) * _haversine_kernel_vector(lat1, lng1, lat2, lng2)
def inverse_haversine(point, distance, direction: Union[Direction, float], unit=Unit.KILOMETERS):
-
lat, lng = point
- lat, lng = map(radians, (lat, lng))
- d = distance
r = get_avg_earth_radius(unit)
- brng = direction.value if isinstance(direction, Direction) else direction
+ return _inverse_haversine_kernel(lat, lng, direction, distance/r)
+
+
+def inverse_haversine_vector(array, distance, direction, unit=Unit.KILOMETERS):
+ if not has_numpy:
+ raise RuntimeError('Error, unable to import Numpy, '
+ 'consider using inverse_haversine instead of inverse_haversine_vector.')
- return_lat = asin(sin(lat) * cos(d / r) + cos(lat)
- * sin(d / r) * cos(brng))
- return_lng = lng + atan2(sin(brng) * sin(d / r) *
- cos(lat), cos(d / r) - sin(lat) * sin(return_lat))
+ # ensure arrays are numpy ndarrays
+ array, distance, direction = map(numpy.asarray, (array, distance, direction))
+
+ # ensure will be able to iterate over rows by adding dimension if needed
+ if array.ndim == 1:
+ array = numpy.expand_dims(array, 0)
+
+ # Asserts that arrays are correctly sized
+ if array.ndim != 2 or array.shape[1] != 2 or array.shape[0] != len(distance) or array.shape[0] != len(direction):
+ raise IndexError("Arrays must be of same size.")
- return_lat, return_lng = map(degrees, (return_lat, return_lng))
- return return_lat, return_lng
+ # unpack latitude/longitude
+ lat, lng = array[:, 0], array[:, 1]
+
+ r = get_avg_earth_radius(unit)
+ return _inverse_haversine_kernel_vector(lat, lng, direction, distance/r)
|
mapado/haversine
|
93275692c6b47e183b1ad8e00a0300b2c37bb5f9
|
diff --git a/tests/test_inverse_haversine_vector.py b/tests/test_inverse_haversine_vector.py
new file mode 100644
index 0000000..4e3dd90
--- /dev/null
+++ b/tests/test_inverse_haversine_vector.py
@@ -0,0 +1,23 @@
+from haversine import inverse_haversine_vector, Unit, Direction
+from numpy import isclose
+from math import pi
+import pytest
+
+from tests.geo_ressources import LYON, PARIS, NEW_YORK, LONDON
+
+
[email protected](
+ "point, dir, dist, result",
+ [
+ (PARIS, Direction.NORTH, 32, (49.144444, 2.3508)),
+ (PARIS, 0, 32, (49.144444, 2.3508)),
+ (LONDON, Direction.WEST, 50, (51.507778, -0.840556)),
+ (LONDON, pi * 1.5, 50, (51.507778, -0.840556)),
+ (NEW_YORK, Direction.SOUTH, 15, (40.568611, -74.235278)),
+ (NEW_YORK, Direction.NORTHWEST, 50, (41.020556, -74.656667)),
+ (NEW_YORK, pi * 1.25, 50, (40.384722, -74.6525)),
+ ],
+)
+def test_inverse_kilometers(point, dir, dist, result):
+ assert isclose(inverse_haversine_vector([point], [dist], [dir]),
+ ([result[0]], [result[1]]), rtol=1e-5).all()
|
haversine_vector is no longer fast
Following commit b949e32246976f14f192c97c9bb0b82b8e1ee451, which introduces element-wise limits check, the vectorized version is 10 times or more slower than it used to be.
There should probably be vectorized versions of the check, or a flag to indicate that limits check is not needed.
|
0.0
|
93275692c6b47e183b1ad8e00a0300b2c37bb5f9
|
[
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point0-Direction.NORTH-32-result0]",
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point5-Direction.NORTHWEST-50-result5]",
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point3-4.71238898038469-50-result3]",
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point1-0-32-result1]",
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point6-3.9269908169872414-50-result6]",
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point2-Direction.WEST-50-result2]",
"tests/test_inverse_haversine_vector.py::test_inverse_kilometers[point4-Direction.SOUTH-15-result4]"
] |
[] |
{
"failed_lite_validators": [
"has_git_commit_hash",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-11 15:18:14+00:00
|
mit
| 3,697 |
|
mapbox__COGDumper-5
|
diff --git a/cogdumper/cog_tiles.py b/cogdumper/cog_tiles.py
index 0acc0c7..318bb1b 100644
--- a/cogdumper/cog_tiles.py
+++ b/cogdumper/cog_tiles.py
@@ -1,5 +1,7 @@
"""Function for extracting tiff tiles."""
+import os
+
from abc import abstractmethod
from math import ceil
import struct
@@ -41,16 +43,18 @@ class COGTiff:
reader:
A reader that implements the cogdumper.cog_tiles.AbstractReader methods
"""
- self._init = False
self._endian = '<'
self._version = 42
self.read = reader
self._big_tiff = False
+ self.header = ''
self._offset = 0
self._image_ifds = []
self._mask_ifds = []
- def ifds(self):
+ self.read_header()
+
+ def _ifds(self):
"""Reads TIFF image file directories from a COG recursively.
Parameters
-----------
@@ -68,10 +72,24 @@ class COGTiff:
next_offset = 0
pos = 0
tags = []
+
+ fallback_size = 4096 if self._big_tiff else 1024
+ if self._offset > len(self.header):
+ byte_starts = len(self.header)
+ byte_ends = byte_starts + self._offset + fallback_size
+ self.header += self.read(byte_starts, byte_ends)
+
if self._big_tiff:
- bytes = self.read(self._offset, 8)
+ bytes = self.header[self._offset: self._offset + 8]
num_tags = struct.unpack(f'{self._endian}Q', bytes)[0]
- bytes = self.read(self._offset + 8, (num_tags * 20) + 8)
+
+ byte_starts = self._offset + 8
+ byte_ends = (num_tags * 20) + 8 + byte_starts
+ if byte_ends > len(self.header):
+ s = len(self.header)
+ self.header += self.read(s, byte_ends)
+
+ bytes = self.header[byte_starts: byte_ends]
for t in range(0, num_tags):
code = struct.unpack(
@@ -100,7 +118,14 @@ class COGTiff:
f'{self._endian}Q',
bytes[pos + 12: pos + 20]
)[0]
- data = self.read(data_offset, tag_len)
+
+ byte_starts = data_offset
+ byte_ends = byte_starts + tag_len
+ if byte_ends > len(self.header):
+ s = len(self.header)
+ self.header += self.read(s, byte_ends)
+
+ data = self.header[byte_starts: byte_ends]
tags.append(
{
@@ -116,12 +141,20 @@ class COGTiff:
self._offset = self._offset + 8 + pos
next_offset = struct.unpack(
f'{self._endian}Q',
- self.read(self._offset, 8)
+ self.header[self._offset: self._offset + 8]
)[0]
else:
- bytes = self.read(self._offset, 2)
+ bytes = self.header[self._offset: self._offset + 2]
num_tags = struct.unpack(f'{self._endian}H', bytes)[0]
- bytes = self.read(self._offset + 2, (num_tags * 12) + 2)
+
+ byte_starts = self._offset + 2
+ byte_ends = (num_tags * 12) + 2 + byte_starts
+ if byte_ends > len(self.header):
+ s = len(self.header)
+ self.header += self.read(s, byte_ends)
+
+ bytes = self.header[byte_starts: byte_ends]
+
for t in range(0, num_tags):
code = struct.unpack(
f'{self._endian}H',
@@ -149,7 +182,13 @@ class COGTiff:
f'{self._endian}L',
bytes[pos + 8: pos + 12]
)[0]
- data = self.read(data_offset, tag_len)
+
+ byte_starts = data_offset
+ byte_ends = byte_starts + tag_len
+ if byte_ends > len(self.header):
+ s = len(self.header)
+ self.header += self.read(s, byte_ends)
+ data = self.header[byte_starts: byte_ends]
tags.append(
{
@@ -165,7 +204,7 @@ class COGTiff:
self._offset = self._offset + 2 + pos
next_offset = struct.unpack(
f'{self._endian}L',
- self.read(self._offset, 4)
+ self.header[self._offset: self._offset + 4]
)[0]
self._offset = next_offset
@@ -176,22 +215,25 @@ class COGTiff:
}
def read_header(self):
+ """Read and parse COG header."""
+ buff_size = int(os.environ.get('COG_INGESTED_BYTES_AT_OPEN', '16384'))
+ self.header = self.read(0, buff_size)
+
# read first 4 bytes to determine tiff or bigtiff and byte order
- bytes = self.read(0, 4)
- if bytes[:2] == b'MM':
+ if self.header[:2] == b'MM':
self._endian = '>'
- self._version = struct.unpack(f'{self._endian}H', bytes[2:4])[0]
+ self._version = struct.unpack(f'{self._endian}H', self.header[2:4])[0]
if self._version == 42:
# TIFF
self._big_tiff = False
# read offset to first IFD
- self._offset = struct.unpack(f'{self._endian}L', self.read(4, 4))[0]
+ self._offset = struct.unpack(f'{self._endian}L', self.header[4:8])[0]
elif self._version == 43:
# BIGTIFF
self._big_tiff = True
- bytes = self.read(4, 12)
+ bytes = self.header[4:16]
bytesize = struct.unpack(f'{self._endian}H', bytes[0:2])[0]
w = struct.unpack(f'{self._endian}H', bytes[2:4])[0]
self._offset = struct.unpack(f'{self._endian}Q', bytes[4:])[0]
@@ -203,7 +245,7 @@ class COGTiff:
self._init = True
# for JPEG we need to read all IFDs, they are at the front of the file
- for ifd in self.ifds():
+ for ifd in self._ifds():
mime_type = 'image/jpeg'
# tile offsets are an extension but if they aren't in the file then
# you can't get a tile back!
@@ -293,9 +335,7 @@ class COGTiff:
self._mask_ifds = []
def get_tile(self, x, y, z):
- if self._init is False:
- self.read_header()
-
+ """Read tile data."""
if z < len(self._image_ifds):
image_ifd = self._image_ifds[z]
idx = (y * image_ifd['ny_tiles']) + x
@@ -326,6 +366,4 @@ class COGTiff:
@property
def version(self):
- if self._init is False:
- self.read_header()
return self._version
diff --git a/cogdumper/filedumper.py b/cogdumper/filedumper.py
index f1454dd..27596a6 100644
--- a/cogdumper/filedumper.py
+++ b/cogdumper/filedumper.py
@@ -1,7 +1,10 @@
"""A utility to dump tiles directly from a local tiff file."""
+import logging
from cogdumper.cog_tiles import AbstractReader
+logger = logging.getLogger(__name__)
+
class Reader(AbstractReader):
"""Wraps the remote COG."""
@@ -10,5 +13,8 @@ class Reader(AbstractReader):
self._handle = handle
def read(self, offset, length):
+ start = offset
+ stop = offset + length - 1
+ logger.info(f'Reading bytes: {start} to {stop}')
self._handle.seek(offset)
return self._handle.read(length)
diff --git a/cogdumper/httpdumper.py b/cogdumper/httpdumper.py
index d76f225..8ea2a1d 100644
--- a/cogdumper/httpdumper.py
+++ b/cogdumper/httpdumper.py
@@ -1,11 +1,15 @@
"""A utility to dump tiles directly from a tiff file on a http server."""
+import logging
+
import requests
from requests.auth import HTTPBasicAuth
from cogdumper.errors import TIFFError
from cogdumper.cog_tiles import AbstractReader
+logger = logging.getLogger(__name__)
+
class Reader(AbstractReader):
"""Wraps the remote COG."""
@@ -37,6 +41,7 @@ class Reader(AbstractReader):
def read(self, offset, length):
start = offset
stop = offset + length - 1
+ logger.info(f'Reading bytes: {start} to {stop}')
headers = {'Range': f'bytes={start}-{stop}'}
r = self.session.get(self.url, auth=self.auth, headers=headers)
if r.status_code != requests.codes.partial_content:
diff --git a/cogdumper/s3dumper.py b/cogdumper/s3dumper.py
index ce60f6e..9b66652 100644
--- a/cogdumper/s3dumper.py
+++ b/cogdumper/s3dumper.py
@@ -1,11 +1,14 @@
"""A utility to dump tiles directly from a tiff file in an S3 bucket."""
import os
+import logging
import boto3
from cogdumper.cog_tiles import AbstractReader
+logger = logging.getLogger(__name__)
+
region = os.environ.get('AWS_REGION', 'us-east-1')
s3 = boto3.resource('s3', region_name=region)
@@ -14,12 +17,15 @@ class Reader(AbstractReader):
"""Wraps the remote COG."""
def __init__(self, bucket_name, key):
+ """Init reader object."""
self.bucket = bucket_name
self.key = key
+ self.source = s3.Object(self.bucket, self.key)
def read(self, offset, length):
+ """Read method."""
start = offset
stop = offset + length - 1
- r = s3.meta.client.get_object(Bucket=self.bucket, Key=self.key,
- Range=f'bytes={start}-{stop}')
+ logger.info(f'Reading bytes: {start} to {stop}')
+ r = self.source.get(Range=f'bytes={start}-{stop}')
return r['Body'].read()
diff --git a/cogdumper/scripts/cli.py b/cogdumper/scripts/cli.py
index 5fdccb3..bd366af 100644
--- a/cogdumper/scripts/cli.py
+++ b/cogdumper/scripts/cli.py
@@ -1,5 +1,5 @@
"""cli."""
-
+import logging
import mimetypes
import click
@@ -25,8 +25,13 @@ def cogdumper():
help='local output directory')
@click.option('--xyz', type=click.INT, default=[0, 0, 0], nargs=3,
help='xyz tile coordinates where z is the overview level')
-def s3(bucket, key, output, xyz):
[email protected]('--verbose', '-v', is_flag=True, help='Show logs')
[email protected]_option(version=cogdumper_version, message='%(version)s')
+def s3(bucket, key, output, xyz, verbose):
"""Read AWS S3 hosted dataset."""
+ if verbose:
+ logging.basicConfig(level=logging.INFO)
+
reader = S3Reader(bucket, key)
cog = COGTiff(reader.read)
mime_type, tile = cog.get_tile(*xyz)
@@ -50,9 +55,13 @@ def s3(bucket, key, output, xyz):
help='local output directory')
@click.option('--xyz', type=click.INT, default=[0, 0, 0], nargs=3,
help='xyz tile coordinates where z is the overview level')
[email protected]('--verbose', '-v', is_flag=True, help='Show logs')
@click.version_option(version=cogdumper_version, message='%(version)s')
-def http(server, path, resource, output, xyz=None):
+def http(server, path, resource, output, xyz, verbose):
"""Read web hosted dataset."""
+ if verbose:
+ logging.basicConfig(level=logging.INFO)
+
reader = HTTPReader(server, path, resource)
cog = COGTiff(reader.read)
mime_type, tile = cog.get_tile(*xyz)
@@ -74,9 +83,13 @@ def http(server, path, resource, output, xyz=None):
help='local output directory')
@click.option('--xyz', type=click.INT, default=[0, 0, 0], nargs=3,
help='xyz tile coordinate where z is the overview level')
[email protected]('--verbose', '-v', is_flag=True, help='Show logs')
@click.version_option(version=cogdumper_version, message='%(version)s')
-def file(file, output, xyz=None):
+def file(file, output, xyz, verbose):
"""Read local dataset."""
+ if verbose:
+ logging.basicConfig(level=logging.INFO)
+
with open(file, 'rb') as src:
reader = FileReader(src)
cog = COGTiff(reader.read)
|
mapbox/COGDumper
|
eb6cbcfbdbc94ee8fd75450908b375fac93e3989
|
diff --git a/tests/test_filedumper.py b/tests/test_filedumper.py
index 2ba8e1c..0bb2b8c 100644
--- a/tests/test_filedumper.py
+++ b/tests/test_filedumper.py
@@ -66,7 +66,6 @@ def test_tiff_ifds(tiff):
reader = FileReader(tiff)
cog = COGTiff(reader.read)
# read private variable directly for testing
- cog.read_header()
assert len(cog._image_ifds) > 0
assert 8 == len(cog._image_ifds[0]['tags'])
assert 0 == cog._image_ifds[4]['next_offset']
@@ -76,7 +75,6 @@ def test_be_tiff_ifds(be_tiff):
reader = FileReader(be_tiff)
cog = COGTiff(reader.read)
# read private variable directly for testing
- cog.read_header()
assert len(cog._image_ifds) > 0
assert 8 == len(cog._image_ifds[0]['tags'])
assert 0 == cog._image_ifds[4]['next_offset']
@@ -86,7 +84,6 @@ def test_bigtiff_ifds(bigtiff):
reader = FileReader(bigtiff)
cog = COGTiff(reader.read)
# read private variable directly for testing
- cog.read_header()
assert len(cog._image_ifds) > 0
assert 7 == len(cog._image_ifds[0]['tags'])
assert 0 == cog._image_ifds[4]['next_offset']
@@ -102,6 +99,19 @@ def test_tiff_tile(tiff):
assert 73 == len(cog._image_ifds[0]['jpeg_tables'])
assert mime_type == 'image/jpeg'
+
+def test_tiff_tile_env(tiff, monkeypatch):
+ monkeypatch.setenv("COG_INGESTED_BYTES_AT_OPEN", "1024")
+ reader = FileReader(tiff)
+ cog = COGTiff(reader.read)
+ mime_type, tile = cog.get_tile(0, 0, 0)
+ assert 1 == len(cog._image_ifds[0]['offsets'])
+ assert 1 == len(cog._image_ifds[0]['byte_counts'])
+ assert 'jpeg_tables' in cog._image_ifds[0]
+ assert 73 == len(cog._image_ifds[0]['jpeg_tables'])
+ assert mime_type == 'image/jpeg'
+
+
def test_bad_tiff_tile(tiff):
reader = FileReader(tiff)
cog = COGTiff(reader.read)
|
Implement chunk or stream read to reduce the number of `read` call
Right now when reading the header we have to loop through each FID to get each FID metadata.
https://github.com/mapbox/COGDumper/blob/dfc6b9b56879c7116f522518ed37617d570acba1/cogdumper/cog_tiles.py#L221-L222
While this permit to read only part we need to determine what will be the offsets for the data and mask part, this is also not efficient, resulting in 10s of small `read` calls.
```
cogdumper s3 --bucket mapbox --key playground/vincent/y.tif --xyz 0 0 0
Read Header
read 0 3
read 4 7
read 8 9
read 10 251
read 516 771
read 260 515
read 826 967
read 250 253
Read IFD
read 1378 1379
read 1380 1549
read 1808 2063
read 1552 1807
read 1548 1551
Read IFD
read 2064 2065
read 2066 2259
read 2332 2395
read 2268 2331
read 2450 2591
read 2258 2261
Read IFD
read 2592 2593
read 2594 2787
read 2812 2827
read 2796 2811
read 2882 3023
read 2786 2789
Read IFD
read 3024 3025
read 3026 3219
read 3282 3423
read 3218 3221
Read IFD
read 3424 3425
read 3426 3619
read 3682 3823
read 3618 3621
Read IFD
read 3824 3825
read 3826 4019
read 4082 4223
read 4018 4021
Read IFD
read 4224 4225
read 4226 4419
read 4482 4623
read 4418 4421
Read IFD
read 4624 4625
read 4626 4795
read 4862 4925
read 4798 4861
read 4794 4797
Read IFD
read 4926 4927
read 4928 5097
read 5116 5131
read 5100 5115
read 5096 5099
Read IFD
read 5132 5133
read 5134 5303
read 5302 5305
Read IFD
read 5306 5307
read 5308 5477
read 5476 5479
Read IFD
read 5480 5481
read 5482 5651
read 5650 5653
Read IFD
read 5654 5655
read 5656 5825
read 5824 5827
Read IFD
read 883331 908790
read 2341520 2341571
```
I don't remember exactly but it seems that GDAL is reading the first 16ko of the file and then determine all it needs. I think applying the same logic could be nice.
cc @normanb @sgillies
|
0.0
|
eb6cbcfbdbc94ee8fd75450908b375fac93e3989
|
[
"tests/test_filedumper.py::test_tiff_ifds",
"tests/test_filedumper.py::test_be_tiff_ifds",
"tests/test_filedumper.py::test_bigtiff_ifds"
] |
[
"tests/test_filedumper.py::test_tiff_version",
"tests/test_filedumper.py::test_bigtiff_version",
"tests/test_filedumper.py::test_be_tiff_version",
"tests/test_filedumper.py::test_tiff_tile",
"tests/test_filedumper.py::test_tiff_tile_env",
"tests/test_filedumper.py::test_bad_tiff_tile",
"tests/test_filedumper.py::test_bigtiff_tile"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-05-15 19:54:57+00:00
|
mit
| 3,698 |
|
mapbox__cligj-14
|
diff --git a/README.rst b/README.rst
index c532ee3..160c057 100755
--- a/README.rst
+++ b/README.rst
@@ -101,7 +101,7 @@ a delimiter, use the ``--rs`` option
if sequence:
for feature in process_features(features):
if use_rs:
- click.echo(b'\x1e', nl=False)
+ click.echo(u'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
diff --git a/cligj/features.py b/cligj/features.py
index efca280..3b23c16 100644
--- a/cligj/features.py
+++ b/cligj/features.py
@@ -65,7 +65,10 @@ def iter_features(geojsonfile, func=None):
for line in geojsonfile:
if line.startswith(u'\x1e'):
if text_buffer:
- newfeat = func(json.loads(text_buffer))
+ obj = json.loads(text_buffer)
+ if 'coordinates' in obj:
+ obj = to_feature(obj)
+ newfeat = func(obj)
if newfeat:
yield newfeat
text_buffer = line.strip(u'\x1e')
@@ -73,7 +76,10 @@ def iter_features(geojsonfile, func=None):
text_buffer += line
# complete our parsing with a for-else clause.
else:
- newfeat = func(json.loads(text_buffer))
+ obj = json.loads(text_buffer)
+ if 'coordinates' in obj:
+ obj = to_feature(obj)
+ newfeat = func(obj)
if newfeat:
yield newfeat
@@ -97,9 +103,17 @@ def iter_features(geojsonfile, func=None):
newfeat = func(feat)
if newfeat:
yield newfeat
+ elif 'coordinates' in obj:
+ newfeat = func(to_feature(obj))
+ if newfeat:
+ yield newfeat
+ for line in geojsonfile:
+ newfeat = func(to_feature(json.loads(line)))
+ if newfeat:
+ yield newfeat
# Indented or pretty-printed GeoJSON features or feature
- # collections will fail out of the try clause above since
+ # collections will fail out of the try clause above since
# they'll have no complete JSON object on their first line.
# To handle these, we slurp in the entire file and parse its
# text.
@@ -115,6 +129,26 @@ def iter_features(geojsonfile, func=None):
newfeat = func(feat)
if newfeat:
yield newfeat
+ elif 'coordinates' in obj:
+ newfeat = func(to_feature(obj))
+ if newfeat:
+ yield newfeat
+
+
+def to_feature(obj):
+ """Takes a feature or a geometry
+ returns feature verbatim or
+ wraps geom in a feature with empty properties
+ """
+ if obj['type'] == 'Feature':
+ return obj
+ elif 'coordinates' in obj:
+ return {
+ 'type': 'Feature',
+ 'properties': {},
+ 'geometry': obj}
+ else:
+ raise ValueError("Object is not a feature or geometry")
def iter_query(query):
|
mapbox/cligj
|
feb9dd8306092a04c3b93ecfd8c6df6f9e135fdf
|
diff --git a/tests/point_pretty_geom.txt b/tests/point_pretty_geom.txt
new file mode 100644
index 0000000..0b8b33d
--- /dev/null
+++ b/tests/point_pretty_geom.txt
@@ -0,0 +1,4 @@
+{
+ "coordinates": [-122.7282, 45.5801],
+ "type": "Point"
+}
diff --git a/tests/test_features.py b/tests/test_features.py
index d8e240e..43eea90 100644
--- a/tests/test_features.py
+++ b/tests/test_features.py
@@ -4,7 +4,7 @@ import sys
import pytest
from cligj.features import \
- coords_from_query, iter_query, \
+ coords_from_query, iter_query, to_feature, \
normalize_feature_inputs, normalize_feature_objects
@@ -118,6 +118,20 @@ def test_coordpairs_space(expected_features):
assert _geoms(features) == _geoms(expected_features)
+def test_geometrysequence(expected_features):
+ features = normalize_feature_inputs(None, 'features', ["tests/twopoints_geom_seq.txt"])
+ assert _geoms(features) == _geoms(expected_features)
+
+
+def test_geometrysequencers(expected_features):
+ features = normalize_feature_inputs(None, 'features', ["tests/twopoints_geom_seqrs.txt"])
+ assert _geoms(features) == _geoms(expected_features)
+
+
+def test_geometrypretty(expected_features):
+ features = normalize_feature_inputs(None, 'features', ["tests/point_pretty_geom.txt"])
+ assert _geoms(features)[0] == _geoms(expected_features)[0]
+
class MockGeo(object):
def __init__(self, feature):
self.__geo_interface__ = feature
@@ -134,3 +148,10 @@ def test_normalize_feature_objects_bad(expected_features):
objs.append(MockGeo(dict()))
with pytest.raises(ValueError):
list(normalize_feature_objects(objs))
+
+def test_to_feature(expected_features):
+ geom = expected_features[0]['geometry']
+ feat = {'type': 'Feature', 'properties': {}, 'geometry': geom}
+ assert to_feature(feat) == to_feature(geom)
+ with pytest.raises(ValueError):
+ assert to_feature({'type': 'foo'})
diff --git a/tests/twopoints_geom_seq.txt b/tests/twopoints_geom_seq.txt
new file mode 100644
index 0000000..a545e4d
--- /dev/null
+++ b/tests/twopoints_geom_seq.txt
@@ -0,0 +1,2 @@
+{"coordinates": [-122.7282, 45.5801], "type": "Point"}
+{"coordinates": [-121.3153, 44.0582], "type": "Point"}
diff --git a/tests/twopoints_geom_seqrs.txt b/tests/twopoints_geom_seqrs.txt
new file mode 100644
index 0000000..e5f40bd
--- /dev/null
+++ b/tests/twopoints_geom_seqrs.txt
@@ -0,0 +1,8 @@
+{
+ "coordinates": [-122.7282, 45.5801],
+ "type": "Point"
+ }
+{
+ "coordinates": [-121.3153, 44.0582],
+ "type": "Point"
+ }
|
Geometry inputs
some programs output geojson geometries (not features). See mapshaper as mentioned in https://github.com/perrygeo/python-rasterstats/issues/81 for one such use case
**Question** Do we support a stream of geojson geometries which would be exposed as an iterable of features? Or should the user rely on an external tool (maybe even a job for `jq`?)
|
0.0
|
feb9dd8306092a04c3b93ecfd8c6df6f9e135fdf
|
[
"tests/test_features.py::test_iter_query_string",
"tests/test_features.py::test_iter_query_file",
"tests/test_features.py::test_coords_from_query_json",
"tests/test_features.py::test_featurecollection_file",
"tests/test_features.py::test_featurecollection_pretty_file",
"tests/test_features.py::test_featurecollection_stdin",
"tests/test_features.py::test_featuresequence",
"tests/test_features.py::test_featuresequence_stdin",
"tests/test_features.py::test_singlefeature",
"tests/test_features.py::test_singlefeature_stdin",
"tests/test_features.py::test_featuresequencers",
"tests/test_features.py::test_featuresequencers_stdin",
"tests/test_features.py::test_coordarrays",
"tests/test_features.py::test_geometrysequence",
"tests/test_features.py::test_geometrysequencers",
"tests/test_features.py::test_geometrypretty",
"tests/test_features.py::test_normalize_feature_objects",
"tests/test_features.py::test_normalize_feature_objects_bad",
"tests/test_features.py::test_to_feature"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2015-12-23 19:36:03+00:00
|
bsd-3-clause
| 3,699 |
|
mapbox__mapbox-sdk-py-128
|
diff --git a/docs/surface.md b/docs/surface.md
index 8fcbb77..c6e9676 100644
--- a/docs/surface.md
+++ b/docs/surface.md
@@ -86,7 +86,7 @@ contours).
... polyline=True, zoom=12, interpolate=False)
>>> points = response.geojson()
>>> [f['properties']['ele'] for f in points['features']]
-[None, None, None]
+[2190, 2190, 2160]
```
diff --git a/mapbox/encoding.py b/mapbox/encoding.py
index 0674d51..6190ea6 100644
--- a/mapbox/encoding.py
+++ b/mapbox/encoding.py
@@ -68,12 +68,13 @@ def encode_waypoints(features, min_limit=None, max_limit=None, precision=6):
return ';'.join(coords)
-def encode_polyline(features, zoom_level=18):
+def encode_polyline(features):
"""Encode and iterable of features as a polyline
"""
points = list(read_points(features))
+ latlon_points = [(x[1], x[0]) for x in points]
codec = PolylineCodec()
- return codec.encode(points)
+ return codec.encode(latlon_points)
def encode_coordinates_json(features):
|
mapbox/mapbox-sdk-py
|
2c11fdee6eee83ea82398cc0756ac7f35aada801
|
diff --git a/tests/test_encoding.py b/tests/test_encoding.py
index fa15f14..9326ae6 100644
--- a/tests/test_encoding.py
+++ b/tests/test_encoding.py
@@ -113,7 +113,7 @@ def test_unknown_object():
def test_encode_polyline():
- expected = "vdatOwp_~EhupD{xiA"
+ expected = "wp_~EvdatO{xiAhupD"
assert expected == encode_polyline(gj_point_features)
assert expected == encode_polyline(gj_multipoint_features)
assert expected == encode_polyline(gj_line_features)
diff --git a/tests/test_surface.py b/tests/test_surface.py
index 05bd2c7..2ba08cd 100644
--- a/tests/test_surface.py
+++ b/tests/test_surface.py
@@ -55,7 +55,7 @@ def test_surface_geojson():
@responses.activate
def test_surface_params():
- params = "&encoded_polyline=~kbkTss%60%7BEQeAHu%40&zoom=16&interpolate=false"
+ params = "&encoded_polyline=ss%60%7BE~kbkTeAQu%40H&zoom=16&interpolate=false"
responses.add(
responses.GET,
'https://api.mapbox.com/v4/surface/mapbox.mapbox-terrain-v1.json?access_token=pk.test&fields=ele&layer=contour&geojson=true' + params,
|
Encoded polylines in wrong coordinate order
Currently, we take the geojson point array and encode the point directly in [lon, lat] order. Polylines should be [lat, lon].
|
0.0
|
2c11fdee6eee83ea82398cc0756ac7f35aada801
|
[
"tests/test_encoding.py::test_encode_polyline",
"tests/test_surface.py::test_surface_params"
] |
[
"tests/test_encoding.py::test_read_geojson_features",
"tests/test_encoding.py::test_geo_interface",
"tests/test_encoding.py::test_encode_waypoints",
"tests/test_encoding.py::test_encode_limits",
"tests/test_encoding.py::test_unsupported_geometry",
"tests/test_encoding.py::test_unknown_object",
"tests/test_encoding.py::test_encode_coordinates_json",
"tests/test_surface.py::test_surface",
"tests/test_surface.py::test_surface_geojson"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-06-09 18:19:51+00:00
|
mit
| 3,700 |
|
mapbox__mapbox-sdk-py-136
|
diff --git a/mapbox/polyline/README.rst b/mapbox/polyline/README.rst
deleted file mode 100644
index 3e0b913..0000000
--- a/mapbox/polyline/README.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-polyline
-========
-
-``polyline`` is a Python implementation of Google's Encoded Polyline Algorithm
-Format (http://goo.gl/PvXf8Y). It is essentially a port of
-https://github.com/mapbox/polyline built with Python 2 and 3 support in mind.
-
-The documentation for ``polyline`` can be found at http://polyline.rtfd.org/.
-
-License
--------
-
-MIT © `Bruno M. Custódio <mailto:[email protected]>`_
diff --git a/mapbox/services/static.py b/mapbox/services/static.py
index 1b51897..5f81a88 100644
--- a/mapbox/services/static.py
+++ b/mapbox/services/static.py
@@ -4,6 +4,7 @@ from uritemplate import URITemplate
from mapbox import errors
from mapbox.services.base import Service
+from mapbox.utils import normalize_geojson_featurecollection
class Static(Service):
@@ -59,10 +60,9 @@ class Static(Service):
fmt=image_format)
if features:
- values['overlay'] = json.dumps({'type': 'FeatureCollection',
- 'features': features},
- separators=(',', ':'),
- sort_keys=sort_keys)
+ collection = normalize_geojson_featurecollection(features)
+ values['overlay'] = json.dumps(
+ collection, separators=(',', ':'), sort_keys=sort_keys)
self._validate_overlay(values['overlay'])
@@ -78,7 +78,7 @@ class Static(Service):
# No overlay
pth = '/{mapid}/{lon},{lat},{z}/{width}x{height}.{fmt}'
-
+
uri = URITemplate(self.baseuri + pth).expand(**values)
res = self.session.get(uri)
self.handle_http_error(res)
diff --git a/mapbox/utils.py b/mapbox/utils.py
new file mode 100644
index 0000000..b811212
--- /dev/null
+++ b/mapbox/utils.py
@@ -0,0 +1,30 @@
+from collections import Mapping, Sequence
+
+def normalize_geojson_featurecollection(obj):
+ """Takes a geojson-like mapping representing
+ geometry, Feature or FeatureCollection (or a sequence of such objects)
+ and returns a FeatureCollection-like dict
+ """
+ if not isinstance(obj, Sequence):
+ obj = [obj]
+
+ features = []
+ for x in obj:
+ if not isinstance(x, Mapping) or 'type' not in x:
+ raise ValueError(
+ "Expecting a geojson-like mapping or sequence of them")
+
+ if 'features' in x:
+ features.extend(x['features'])
+ elif 'geometry' in x:
+ features.append(x)
+ elif 'coordinates' in x:
+ feat = {'type': 'Feature',
+ 'properties': {},
+ 'geometry': x}
+ features.append(feat)
+ else:
+ raise ValueError(
+ "Expecting a geojson-like mapping or sequence of them")
+
+ return {'type': 'FeatureCollection', 'features': features}
|
mapbox/mapbox-sdk-py
|
532ef39d0ecb64144e09cfbcf2d26b57d5fbcf9c
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..35edc5e
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,66 @@
+import pytest
+
+from mapbox.utils import normalize_geojson_featurecollection
+
+
+geom = {'type': 'Point', 'coordinates': (-122, 45)}
+feat = {'type': 'Feature', 'geometry': geom, 'properties': {}}
+coll = {'type': 'FeatureCollection', 'features': [feat]}
+coll2 = {'type': 'FeatureCollection', 'features': [feat, feat]}
+
+
+def test_geom():
+ res = normalize_geojson_featurecollection(geom)
+ assert res['type'] == 'FeatureCollection'
+ assert res == coll
+
+
+def test_feat():
+ res = normalize_geojson_featurecollection(feat)
+ assert res['type'] == 'FeatureCollection'
+ assert res == coll
+
+
+def test_coll():
+ res = normalize_geojson_featurecollection(coll)
+ assert res['type'] == 'FeatureCollection'
+ assert res == coll
+
+
+def test_mult_geom():
+ geoms = (geom, geom)
+ res = normalize_geojson_featurecollection(geoms)
+ assert res['type'] == 'FeatureCollection'
+ assert res == coll2
+
+
+def test_mult_feat():
+ feats = (feat, feat)
+ res = normalize_geojson_featurecollection(feats)
+ assert res['type'] == 'FeatureCollection'
+ assert res == coll2
+
+
+def test_mult_coll():
+ colls = (coll, coll)
+ res = normalize_geojson_featurecollection(colls)
+ assert res['type'] == 'FeatureCollection'
+ assert res == coll2
+
+
+def test_mix():
+ objs = (geom, feat, coll, coll2)
+ res = normalize_geojson_featurecollection(objs)
+ assert res['type'] == 'FeatureCollection'
+ assert len(res['features']) == 5
+
+
+def test_nonsense():
+ with pytest.raises(ValueError):
+ normalize_geojson_featurecollection(123)
+
+ with pytest.raises(ValueError):
+ normalize_geojson_featurecollection({'foo': 'bar'})
+
+ with pytest.raises(ValueError):
+ normalize_geojson_featurecollection({'type': 'not-geojson'})
|
Accept bare geometries as static map overlays
Where "bare" means not in the context of a Feature. See #132 for background.
|
0.0
|
532ef39d0ecb64144e09cfbcf2d26b57d5fbcf9c
|
[
"tests/test_utils.py::test_geom",
"tests/test_utils.py::test_feat",
"tests/test_utils.py::test_coll",
"tests/test_utils.py::test_mult_geom",
"tests/test_utils.py::test_mult_feat",
"tests/test_utils.py::test_mult_coll",
"tests/test_utils.py::test_mix",
"tests/test_utils.py::test_nonsense"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_added_files",
"has_removed_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-14 17:42:16+00:00
|
mit
| 3,701 |
|
mapbox__mapbox-sdk-py-261
|
diff --git a/CHANGES b/CHANGES
index 6133da9..537d555 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,13 @@
Changes
=======
+0.17.1 (2018-09-26)
+-------------------
+
+Bug fixes:
+
+- Fix for the encoding of Tilequery coordinates (#259).
+
0.17.0 (TBD)
------------
diff --git a/mapbox/__init__.py b/mapbox/__init__.py
index ddd59cd..7e974c4 100644
--- a/mapbox/__init__.py
+++ b/mapbox/__init__.py
@@ -1,5 +1,5 @@
# mapbox
-__version__ = "0.17.0"
+__version__ = "0.17.1"
from .services.datasets import Datasets
from .services.directions import Directions
diff --git a/mapbox/services/tilequery.py b/mapbox/services/tilequery.py
index b0380de..8d3a6c9 100644
--- a/mapbox/services/tilequery.py
+++ b/mapbox/services/tilequery.py
@@ -163,12 +163,12 @@ class Tilequery(Service):
# Create dict to assist in building URI resource path.
path_values = dict(
- map_id=map_id, api_name=self.api_name, coordinates="{},{}".format(lon, lat)
+ api_name=self.api_name, lon=lon, lat=lat
)
# Build URI resource path.
- path_part = "/{map_id}/{api_name}/{coordinates}.json"
+ path_part = "/" + map_id + "/{api_name}/{lon},{lat}.json"
uri = URITemplate(self.base_uri + path_part).expand(**path_values)
# Build URI query_parameters.
|
mapbox/mapbox-sdk-py
|
15989e7c61d23ba16745a2394a87d46b6f7675de
|
diff --git a/tests/test_tilequery.py b/tests/test_tilequery.py
index e6074e0..9c53da8 100644
--- a/tests/test_tilequery.py
+++ b/tests/test_tilequery.py
@@ -97,7 +97,7 @@ def test_tilequery_one_mapid():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test",
match_querystring=True,
body='{"key": "value"}',
@@ -117,9 +117,9 @@ def test_tilequery_two_mapids():
method=GET,
url="https://api.mapbox.com"
+ "/v4"
- + "/mapbox.mapbox-streets-v9%2Cmapbox.mapbox-streets-v10"
+ + "/mapbox.mapbox-streets-v9,mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test",
match_querystring=True,
body='{"key": "value"}',
@@ -143,7 +143,7 @@ def test_tilequery_with_radius():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25",
match_querystring=True,
@@ -168,7 +168,7 @@ def test_tilequery_with_limit():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&limit=25",
match_querystring=True,
@@ -193,7 +193,7 @@ def test_tilequery_with_dedupe():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&dedupe=true",
match_querystring=True,
@@ -218,7 +218,7 @@ def test_tilequery_with_geometry():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&geometry=linestring",
match_querystring=True,
@@ -243,9 +243,9 @@ def test_tilequery_with_layers():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
- + "&layers=layer0%2Clayer1%2Clayer2",
+ + "&layers=layer0,layer1%2Clayer2",
match_querystring=True,
body='{"key": "value"}',
status=200,
@@ -271,7 +271,7 @@ def test_tilequery_with_radius_and_limit():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25",
@@ -297,7 +297,7 @@ def test_tilequery_with_radius_and_dedupe():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&dedupe=true",
@@ -323,7 +323,7 @@ def test_tilequery_with_radius_and_geometry():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&geometry=linestring",
@@ -349,10 +349,10 @@ def test_tilequery_with_radius_and_layers():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
- + "&layers=layer0%2Clayer1%2Clayer2",
+ + "&layers=layer0,layer1%2Clayer2",
match_querystring=True,
body='{"key": "value"}',
status=200,
@@ -379,7 +379,7 @@ def test_tilequery_with_radius_limit_and_dedupe():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25"
@@ -406,7 +406,7 @@ def test_tilequery_with_radius_limit_and_geometry():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25"
@@ -438,11 +438,11 @@ def test_tilequery_with_radius_limit_and_layers():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25"
- + "&layers=layer0%2Clayer1%2Clayer2",
+ + "&layers=layer0,layer1%2Clayer2",
match_querystring=True,
body='{"key": "value"}',
status=200,
@@ -470,7 +470,7 @@ def test_tilequery_with_radius_limit_dedupe_and_geometry():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25"
@@ -504,12 +504,12 @@ def test_tilequery_with_radius_limit_dedupe_and_layers():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25"
+ "&dedupe=true"
- + "&layers=layer0%2Clayer1%2Clayer2",
+ + "&layers=layer0,layer1%2Clayer2",
match_querystring=True,
body='{"key": "value"}',
status=200,
@@ -538,13 +538,13 @@ def test_tilequery_with_radius_limit_dedupe_geometry_and_layers():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test"
+ "&radius=25"
+ "&limit=25"
+ "&dedupe=true"
+ "&geometry=linestring"
- + "&layers=layer0%2Clayer1%2Clayer2",
+ + "&layers=layer0,layer1%2Clayer2",
match_querystring=True,
body='{"key": "value"}',
status=200,
@@ -574,7 +574,7 @@ def test_tilequery_geojson_method():
+ "/v4"
+ "/mapbox.mapbox-streets-v10"
+ "/tilequery"
- + "/0.0%2C1.1.json"
+ + "/0.0,1.1.json"
+ "?access_token=pk.test",
match_querystring=True,
body='{"key": "value"}',
|
Tilequery coordinate encoding is wrong
Version 0.17.0 puts `-105.0%2C40.0` in the request URL, but the Tilequery API requires `-105.0,40.0`.
|
0.0
|
15989e7c61d23ba16745a2394a87d46b6f7675de
|
[
"tests/test_tilequery.py::test_tilequery_one_mapid",
"tests/test_tilequery.py::test_tilequery_two_mapids",
"tests/test_tilequery.py::test_tilequery_with_radius",
"tests/test_tilequery.py::test_tilequery_with_limit",
"tests/test_tilequery.py::test_tilequery_with_dedupe",
"tests/test_tilequery.py::test_tilequery_with_geometry",
"tests/test_tilequery.py::test_tilequery_with_layers",
"tests/test_tilequery.py::test_tilequery_with_radius_and_limit",
"tests/test_tilequery.py::test_tilequery_with_radius_and_dedupe",
"tests/test_tilequery.py::test_tilequery_with_radius_and_geometry",
"tests/test_tilequery.py::test_tilequery_with_radius_and_layers",
"tests/test_tilequery.py::test_tilequery_with_radius_limit_and_dedupe",
"tests/test_tilequery.py::test_tilequery_with_radius_limit_and_geometry",
"tests/test_tilequery.py::test_tilequery_with_radius_limit_and_layers",
"tests/test_tilequery.py::test_tilequery_with_radius_limit_dedupe_and_geometry",
"tests/test_tilequery.py::test_tilequery_with_radius_limit_dedupe_and_layers",
"tests/test_tilequery.py::test_tilequery_with_radius_limit_dedupe_geometry_and_layers",
"tests/test_tilequery.py::test_tilequery_geojson_method"
] |
[
"tests/test_tilequery.py::test_object_properties",
"tests/test_tilequery.py::test_validate_lon_invalid[-181]",
"tests/test_tilequery.py::test_validate_lon_invalid[181]",
"tests/test_tilequery.py::test_validate_lon_valid[-180]",
"tests/test_tilequery.py::test_validate_lon_valid[0]",
"tests/test_tilequery.py::test_validate_lon_valid[180]",
"tests/test_tilequery.py::test_validate_lat_invalid[-86]",
"tests/test_tilequery.py::test_validate_lat_invalid[86]",
"tests/test_tilequery.py::test_validate_lat_valid[-85.0511]",
"tests/test_tilequery.py::test_validate_lat_valid[0]",
"tests/test_tilequery.py::test_validate_lat_valid[85.0511]",
"tests/test_tilequery.py::test_validate_radius_invalid",
"tests/test_tilequery.py::test_validate_radius_valid[0]",
"tests/test_tilequery.py::test_validate_radius_valid[1000000]",
"tests/test_tilequery.py::test_validate_limit_invalid[0]",
"tests/test_tilequery.py::test_validate_limit_invalid[51]",
"tests/test_tilequery.py::test_validate_limit_valid[1]",
"tests/test_tilequery.py::test_validate_limit_valid[25]",
"tests/test_tilequery.py::test_validate_limit_valid[50]",
"tests/test_tilequery.py::test_validate_geometry_invalid",
"tests/test_tilequery.py::test_validate_radius_geometry[linestring]",
"tests/test_tilequery.py::test_validate_radius_geometry[point]",
"tests/test_tilequery.py::test_validate_radius_geometry[polygon]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-09-26 22:33:13+00:00
|
mit
| 3,702 |
|
mapbox__rio-color-14
|
diff --git a/rio_color/scripts/cli.py b/rio_color/scripts/cli.py
index 7ec9cab..9580f68 100755
--- a/rio_color/scripts/cli.py
+++ b/rio_color/scripts/cli.py
@@ -1,13 +1,27 @@
import click
+
import rasterio
from rio_color.workers import atmos_worker, color_worker
from rio_color.operations import parse_operations
import riomucho
+jobs_opt = click.option(
+ '--jobs', '-j', type=int, default=1,
+ help="Number of jobs to run simultaneously, Use -1 for all cores, default: 1")
+
+
+def check_jobs(jobs):
+ if jobs == 0:
+ raise click.UsageError("Jobs must be >= 1 or == -1")
+ elif jobs < 0:
+ import multiprocessing
+ jobs = multiprocessing.cpu_count()
+ return jobs
+
+
@click.command('color')
[email protected]('--jobs', '-j', type=int, default=1,
- help="Number of jobs to run simultaneously, default: 1")
+@jobs_opt
@click.option('--out-dtype', '-d', type=click.Choice(['uint8', 'uint16']),
help="Integer data type for output data, default: same as input")
@click.argument('src_path', type=click.Path(exists=True))
@@ -73,6 +87,8 @@ Example:
'out_dtype': out_dtype
}
+ jobs = check_jobs(jobs)
+
if jobs > 1:
with riomucho.RioMucho(
[src_path],
@@ -103,8 +119,7 @@ Example:
@click.option('--bias', '-b', type=click.FLOAT, default=15,
help="Skew (brighten/darken) the output. Lower values make it "
"brighter. 0..100 (50 is none), default: 15.")
[email protected]('--jobs', '-j', type=int, default=1,
- help="Number of jobs to run simultaneously, default: 1")
+@jobs_opt
@click.option('--out-dtype', '-d', type=click.Choice(['uint8', 'uint16']),
help="Integer data type for output data, default: same as input")
@click.argument('src_path', type=click.Path(exists=True))
@@ -132,6 +147,8 @@ def atmos(ctx, atmo, contrast, bias, jobs, out_dtype,
'out_dtype': out_dtype
}
+ jobs = check_jobs(jobs)
+
if jobs > 1:
with riomucho.RioMucho(
[src_path],
|
mapbox/rio-color
|
bb410109f6a0ae376443880f96dc8981766066a3
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 8448a74..1d30644 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,10 +1,12 @@
import os
+from click import UsageError
from click.testing import CliRunner
import numpy as np
+import pytest
import rasterio
-from rio_color.scripts.cli import color, atmos
+from rio_color.scripts.cli import color, atmos, check_jobs
def equal(r1, r2):
@@ -98,3 +100,25 @@ def test_bad_op(tmpdir):
assert result.exit_code == 2
assert "foob is not a valid operation" in result.output
assert not os.path.exists(output)
+
+
+def test_color_jobsn1(tmpdir):
+ output = str(tmpdir.join('colorj1.tif'))
+ runner = CliRunner()
+ result = runner.invoke(
+ color,
+ [
+ '-d', 'uint8',
+ '-j', '-1',
+ 'tests/rgb8.tif',
+ output,
+ "gamma 1,2,3 1.85"])
+ assert result.exit_code == 0
+ assert os.path.exists(output)
+
+
+def test_check_jobs():
+ assert 1 == check_jobs(1)
+ assert check_jobs(-1) > 0
+ with pytest.raises(UsageError):
+ check_jobs(0)
|
use all cores with -j -1
Modify max_procs so that a negative value means "use the number of cores available on this machine"
|
0.0
|
bb410109f6a0ae376443880f96dc8981766066a3
|
[
"tests/test_cli.py::test_check_jobs"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-04-18 20:15:46+00:00
|
mit
| 3,703 |
|
mapbox__rio-mbtiles-67
|
diff --git a/CHANGES.txt b/CHANGES.txt
index a2a5269..b479090 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,6 +1,12 @@
Changes
=======
+1.5b3 (2020-10-28)
+------------------
+
+- Add a --covers option, taking a quadkey, which limits the output to tiles
+ that cover the quadkey's tile (#66).
+
1.5b2 (2020-10-16)
------------------
diff --git a/mbtiles/__init__.py b/mbtiles/__init__.py
index b918758..c8d075a 100644
--- a/mbtiles/__init__.py
+++ b/mbtiles/__init__.py
@@ -3,7 +3,7 @@
import sys
import warnings
-__version__ = "1.5b2"
+__version__ = "1.5b3"
if sys.version_info < (3, 7):
warnings.warn(
diff --git a/mbtiles/scripts/cli.py b/mbtiles/scripts/cli.py
index 17e2821..884f436 100644
--- a/mbtiles/scripts/cli.py
+++ b/mbtiles/scripts/cli.py
@@ -204,6 +204,7 @@ def extract_features(ctx, param, value):
@click.option(
"--progress-bar", "-#", default=False, is_flag=True, help="Display progress bar."
)
[email protected]("--covers", help="Restrict mbtiles output to cover a quadkey")
@click.option(
"--cutline",
type=click.Path(exists=True),
@@ -248,6 +249,7 @@ def mbtiles(
rgba,
implementation,
progress_bar,
+ covers,
cutline,
open_options,
warp_options,
@@ -351,6 +353,10 @@ def mbtiles(
)
warp_options["cutline"] = shapely.wkt.dumps(cutline_rev)
+ if covers is not None:
+ covers_tile = mercantile.quadkey_to_tile(covers)
+ west, south, east, north = mercantile.bounds(covers_tile)
+
# Resolve the minimum and maximum zoom levels for export.
if zoom_levels:
minzoom, maxzoom = map(int, zoom_levels.split(".."))
|
mapbox/rio-mbtiles
|
127233e2999c41bc7fdd3c11b6b7ff2800479322
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 5808479..c17e0b4 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -483,3 +483,28 @@ def test_invalid_cutline(tmpdir, data, rgba_points_path, impl, filename):
],
)
assert result.exit_code == 1
+
+
[email protected](("source", "quadkey", "zooms", "exp_num_results"), [("RGB.byte.tif", "0320", "4..4", 1), ("RGB.byte.tif", "032022", "6..6", 0)])
+def test_covers(tmpdir, data, source, quadkey, zooms, exp_num_results):
+ inputfile = str(data.join(source))
+ outputfile = str(tmpdir.join("export.mbtiles"))
+ runner = CliRunner()
+ result = runner.invoke(
+ main_group,
+ [
+ "mbtiles",
+ "--zoom-levels",
+ zooms,
+ "--covers",
+ quadkey,
+ inputfile,
+ outputfile,
+ ],
+ )
+ assert result.exit_code == 0
+ conn = sqlite3.connect(outputfile)
+ cur = conn.cursor()
+ cur.execute("select * from tiles")
+ results = cur.fetchall()
+ assert len(results) == exp_num_results
|
Add --covers option to define output extent
Would work like `--cutline` but without masking. Possible values:
* ~A GeoJSON FeatureCollection~
* A single web mercator quadkey string
* A single JSON-encoded `[x, y, z]` web mercator tile object
|
0.0
|
127233e2999c41bc7fdd3c11b6b7ff2800479322
|
[
"tests/test_cli.py::test_covers[RGB.byte.tif-0320-4..4-1]",
"tests/test_cli.py::test_covers[RGB.byte.tif-032022-6..6-0]"
] |
[
"tests/test_cli.py::test_cli_help",
"tests/test_cli.py::test_dst_nodata_validation",
"tests/test_cli.py::test_export_metadata[RGB.byte.tif]",
"tests/test_cli.py::test_export_metadata[RGBA.byte.tif]",
"tests/test_cli.py::test_export_overwrite",
"tests/test_cli.py::test_export_metadata_output_opt",
"tests/test_cli.py::test_export_tiles",
"tests/test_cli.py::test_export_zoom",
"tests/test_cli.py::test_export_jobs",
"tests/test_cli.py::test_export_src_nodata",
"tests/test_cli.py::test_export_dump",
"tests/test_cli.py::test_export_tile_size[256]",
"tests/test_cli.py::test_export_tile_size[512]",
"tests/test_cli.py::test_export_bilinear",
"tests/test_cli.py::test_skip_empty",
"tests/test_cli.py::test_invalid_format_rgba",
"tests/test_cli.py::test_rgba_png[RGBA.byte.tif]",
"tests/test_cli.py::test_export_count[cf-4-10-70-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[cf-6-7-6-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[cf-4-10-12-rgb-193f513.vrt]",
"tests/test_cli.py::test_export_count[mp-4-10-70-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[mp-6-7-6-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[mp-4-10-12-rgb-193f513.vrt]",
"tests/test_cli.py::test_progress_bar[cf-RGBA.byte.tif]",
"tests/test_cli.py::test_progress_bar[mp-RGBA.byte.tif]",
"tests/test_cli.py::test_appending_export_count[cf-4-10-70-sources0]",
"tests/test_cli.py::test_appending_export_count[mp-4-10-70-sources0]",
"tests/test_cli.py::test_mutually_exclusive_operations",
"tests/test_cli.py::test_input_required[inputfiles0]",
"tests/test_cli.py::test_input_required[inputfiles1]",
"tests/test_cli.py::test_append_or_overwrite_required",
"tests/test_cli.py::test_cutline_progress_bar[cf-RGBA.byte.tif]",
"tests/test_cli.py::test_cutline_progress_bar[mp-RGBA.byte.tif]",
"tests/test_cli.py::test_invalid_cutline[cf-RGBA.byte.tif]",
"tests/test_cli.py::test_invalid_cutline[mp-RGBA.byte.tif]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-28 02:53:07+00:00
|
mit
| 3,704 |
|
mapbox__rio-mbtiles-75
|
diff --git a/CHANGES.txt b/CHANGES.txt
index 151e1d8..057614e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,6 +1,13 @@
Changes
=======
+1.5.1 (2021-02-02)
+------------------
+
+- Add --co (creation) options for fine control over quality of tiles using any
+ of a format's valid GDAL creation options (#73).
+- Add support for WebP tiles (#72).
+
1.5.0 (2020-10-30)
------------------
diff --git a/README.rst b/README.rst
index 2020e36..16c7395 100644
--- a/README.rst
+++ b/README.rst
@@ -6,8 +6,8 @@ rio-mbtiles
A plugin for the
`Rasterio CLI <https://github.com/mapbox/rasterio/blob/master/docs/cli.rst>`__
-that exports a raster dataset to the MBTiles (version 1.1) format. Features
-include automatic reprojection and parallel processing.
+that exports a raster dataset to the MBTiles (version 1.3) format. Features
+include automatic reprojection and concurrent tile generation.
Usage
-----
@@ -17,15 +17,21 @@ Usage
$ rio mbtiles --help
Usage: rio mbtiles [OPTIONS] INPUT [OUTPUT]
- Export a dataset to MBTiles (version 1.1) in a SQLite file.
+ Export a dataset to MBTiles (version 1.3) in a SQLite file.
The input dataset may have any coordinate reference system. It must have
at least three bands, which will be become the red, blue, and green bands
of the output image tiles.
An optional fourth alpha band may be copied to the output tiles by using
- the --rgba option in combination with the PNG format. This option requires
- that the input dataset has at least 4 bands.
+ the --rgba option in combination with the PNG or WEBP formats. This option
+ requires that the input dataset has at least 4 bands.
+
+ The default quality for JPEG and WEBP output (possible range: 10-100) is
+ 75. This value can be changed with the use of the QUALITY creation option,
+ e.g. `--co QUALITY=90`. The default zlib compression level for PNG output
+ (possible range: 1-9) is 6. This value can be changed like `--co
+ ZLEVEL=8`. Lossless WEBP can be chosen with `--co LOSSLESS=TRUE`.
If no zoom levels are specified, the defaults are the zoom levels nearest
to the one at which one tile may contain the entire source dataset.
@@ -48,7 +54,7 @@ Usage
--description TEXT MBTiles dataset description.
--overlay Export as an overlay (the default).
--baselayer Export as a base layer.
- -f, --format [JPEG|PNG] Tile image format.
+ -f, --format [JPEG|PNG|WEBP] Tile image format.
--tile-size INTEGER Width and height of individual square tiles
to create. [default: 256]
@@ -69,7 +75,7 @@ Usage
nearest]
--version Show the version and exit.
- --rgba Select RGBA output. For PNG only.
+ --rgba Select RGBA output. For PNG or WEBP only.
--implementation [cf|mp] Concurrency implementation. Use
concurrent.futures (cf) or multiprocessing
(mp).
@@ -85,6 +91,10 @@ Usage
GDAL format driver documentation for more
information.
+ --co, --profile NAME=VALUE Driver specific creation options. See the
+ documentation for the selected output driver
+ for more information.
+
--wo NAME=VALUE See the GDAL warp options documentation for
more information.
diff --git a/mbtiles/__init__.py b/mbtiles/__init__.py
index 88ac678..a8fa51f 100644
--- a/mbtiles/__init__.py
+++ b/mbtiles/__init__.py
@@ -3,7 +3,7 @@
import sys
import warnings
-__version__ = "1.5.0"
+__version__ = "1.5.1"
if sys.version_info < (3, 7):
warnings.warn(
diff --git a/mbtiles/cf.py b/mbtiles/cf.py
index 9a2d306..89e2ff4 100644
--- a/mbtiles/cf.py
+++ b/mbtiles/cf.py
@@ -25,13 +25,14 @@ def process_tiles(
progress_bar=None,
open_options=None,
warp_options=None,
+ creation_options=None,
):
"""Warp imagery into tiles and commit to mbtiles database.
"""
with concurrent.futures.ProcessPoolExecutor(
max_workers=num_workers,
initializer=init_worker,
- initargs=(inputfile, base_kwds, resampling, open_options, warp_options),
+ initargs=(inputfile, base_kwds, resampling, open_options, warp_options, creation_options),
) as executor:
group = islice(tiles, BATCH_SIZE)
futures = {executor.submit(process_tile, tile) for tile in group}
diff --git a/mbtiles/mp.py b/mbtiles/mp.py
index 4c3c48d..8c0de7d 100644
--- a/mbtiles/mp.py
+++ b/mbtiles/mp.py
@@ -29,13 +29,14 @@ def process_tiles(
progress_bar=None,
open_options=None,
warp_options=None,
+ creation_options=None,
):
"""Warp raster into tiles and commit tiles to mbtiles database.
"""
pool = Pool(
num_workers,
init_worker,
- (inputfile, base_kwds, resampling, open_options, warp_options),
+ (inputfile, base_kwds, resampling, open_options, warp_options, creation_options),
100 * BATCH_SIZE,
)
diff --git a/mbtiles/scripts/cli.py b/mbtiles/scripts/cli.py
index ba579bb..06b4a97 100644
--- a/mbtiles/scripts/cli.py
+++ b/mbtiles/scripts/cli.py
@@ -13,7 +13,7 @@ import mercantile
import rasterio
from rasterio.enums import Resampling
from rasterio.errors import FileOverwriteError
-from rasterio.rio.options import output_opt, _cb_key_val
+from rasterio.rio.options import creation_options, output_opt, _cb_key_val
from rasterio.warp import transform, transform_geom
import shapely.affinity
from shapely.geometry import mapping, shape
@@ -180,7 +180,7 @@ def extract_features(ctx, param, value):
)
@click.version_option(version=mbtiles_version, message="%(version)s")
@click.option(
- "--rgba", default=False, is_flag=True, help="Select RGBA output. For PNG only."
+ "--rgba", default=False, is_flag=True, help="Select RGBA output. For PNG or WEBP only."
)
@click.option(
"--implementation",
@@ -208,6 +208,7 @@ def extract_features(ctx, param, value):
callback=_cb_key_val,
help="Format driver-specific options to be used when accessing the input dataset. See the GDAL format driver documentation for more information.",
)
+@creation_options
@click.option(
"--wo",
"warp_options",
@@ -239,17 +240,25 @@ def mbtiles(
covers,
cutline,
open_options,
+ creation_options,
warp_options,
):
- """Export a dataset to MBTiles (version 1.1) in a SQLite file.
+ """Export a dataset to MBTiles (version 1.3) in a SQLite file.
The input dataset may have any coordinate reference system. It must
have at least three bands, which will be become the red, blue, and
green bands of the output image tiles.
An optional fourth alpha band may be copied to the output tiles by
- using the --rgba option in combination with the PNG format. This
- option requires that the input dataset has at least 4 bands.
+ using the --rgba option in combination with the PNG or WEBP formats.
+ This option requires that the input dataset has at least 4 bands.
+
+ The default quality for JPEG and WEBP output (possible range:
+ 10-100) is 75. This value can be changed with the use of the QUALITY
+ creation option, e.g. `--co QUALITY=90`. The default zlib
+ compression level for PNG output (possible range: 1-9) is 6. This
+ value can be changed like `--co ZLEVEL=8`. Lossless WEBP can be
+ chosen with `--co LOSSLESS=TRUE`.
If no zoom levels are specified, the defaults are the zoom levels
nearest to the one at which one tile may contain the entire source
@@ -576,6 +585,7 @@ def mbtiles(
image_dump=image_dump,
progress_bar=pbar,
open_options=open_options,
+ creation_options=creation_options,
warp_options=warp_options,
)
diff --git a/mbtiles/worker.py b/mbtiles/worker.py
index 9a6568d..c0099ca 100644
--- a/mbtiles/worker.py
+++ b/mbtiles/worker.py
@@ -17,13 +17,14 @@ TILES_CRS = "EPSG:3857"
log = logging.getLogger(__name__)
-def init_worker(path, profile, resampling_method, open_opts, warp_opts):
- global base_kwds, filename, resampling, open_options, warp_options
+def init_worker(path, profile, resampling_method, open_opts=None, warp_opts=None, creation_opts=None):
+ global base_kwds, filename, resampling, open_options, warp_options, creation_options
resampling = Resampling[resampling_method]
base_kwds = profile.copy()
filename = path
open_options = open_opts.copy() if open_opts is not None else {}
warp_options = warp_opts.copy() if warp_opts is not None else {}
+ creation_options = creation_opts.copy() if creation_opts is not None else {}
def process_tile(tile):
@@ -44,7 +45,7 @@ def process_tile(tile):
Image bytes corresponding to the tile.
"""
- global base_kwds, resampling, filename, open_options, warp_options
+ global base_kwds, resampling, filename, open_options, warp_options, creation_options
with rasterio.open(filename, **open_options) as src:
@@ -53,6 +54,7 @@ def process_tile(tile):
lrx, lry = mercantile.xy(*mercantile.ul(tile.x + 1, tile.y + 1, tile.z))
kwds = base_kwds.copy()
+ kwds.update(**creation_options)
kwds["transform"] = transform_from_bounds(
ulx, lry, lrx, uly, kwds["width"], kwds["height"]
)
|
mapbox/rio-mbtiles
|
fad1e19f35bfdbb34f38563a6c2f638ce5c225ca
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 1d50ad3..02a4c2f 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -18,7 +18,7 @@ def test_cli_help():
runner = CliRunner()
result = runner.invoke(main_group, ["mbtiles", "--help"])
assert result.exit_code == 0
- assert "Export a dataset to MBTiles (version 1.1)" in result.output
+ assert "Export a dataset to MBTiles (version 1.3)" in result.output
@pytest.mark.skipif("sys.version_info >= (3, 7)", reason="Test requires Python < 3.7")
|
Add support for GDAL creation options for output tiles
Output tiles are written by GDAL, remember.
|
0.0
|
fad1e19f35bfdbb34f38563a6c2f638ce5c225ca
|
[
"tests/test_cli.py::test_cli_help"
] |
[
"tests/test_cli.py::test_dst_nodata_validation",
"tests/test_cli.py::test_export_metadata[RGB.byte.tif]",
"tests/test_cli.py::test_export_metadata[RGBA.byte.tif]",
"tests/test_cli.py::test_export_overwrite",
"tests/test_cli.py::test_export_metadata_output_opt",
"tests/test_cli.py::test_export_tiles",
"tests/test_cli.py::test_export_zoom",
"tests/test_cli.py::test_export_jobs",
"tests/test_cli.py::test_export_src_nodata",
"tests/test_cli.py::test_export_dump",
"tests/test_cli.py::test_export_tile_size[256]",
"tests/test_cli.py::test_export_tile_size[512]",
"tests/test_cli.py::test_export_bilinear",
"tests/test_cli.py::test_skip_empty",
"tests/test_cli.py::test_invalid_format_rgba",
"tests/test_cli.py::test_rgba_png[RGBA.byte.tif]",
"tests/test_cli.py::test_export_count[cf-4-10-70-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[cf-6-7-6-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[cf-4-10-12-rgb-193f513.vrt]",
"tests/test_cli.py::test_export_count[mp-4-10-70-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[mp-6-7-6-RGB.byte.tif]",
"tests/test_cli.py::test_export_count[mp-4-10-12-rgb-193f513.vrt]",
"tests/test_cli.py::test_progress_bar[cf-RGBA.byte.tif]",
"tests/test_cli.py::test_progress_bar[mp-RGBA.byte.tif]",
"tests/test_cli.py::test_appending_export_count[cf-4-10-70-sources0]",
"tests/test_cli.py::test_appending_export_count[mp-4-10-70-sources0]",
"tests/test_cli.py::test_input_required[inputfiles0]",
"tests/test_cli.py::test_input_required[inputfiles1]",
"tests/test_cli.py::test_append_or_overwrite_required",
"tests/test_cli.py::test_cutline_progress_bar[cf-RGBA.byte.tif]",
"tests/test_cli.py::test_cutline_progress_bar[mp-RGBA.byte.tif]",
"tests/test_cli.py::test_invalid_cutline[cf-RGBA.byte.tif]",
"tests/test_cli.py::test_invalid_cutline[mp-RGBA.byte.tif]",
"tests/test_cli.py::test_covers[RGB.byte.tif-0320-4..4-1]",
"tests/test_cli.py::test_covers[RGB.byte.tif-032022-6..6-0]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-02 22:18:27+00:00
|
mit
| 3,705 |
|
mapbox__tilesets-cli-97
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b19be25..a363530 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,8 @@
# Unreleased
+# 1.4.1 (2020-08-06)
+- Use the `/jobs` endpoint in the `status` command
+
# 1.4.0 (2020-08-04)
- Create upload-source command to replace add-source, with extra `--replace` option
diff --git a/README.md b/README.md
index 1fab177..6628b5d 100644
--- a/README.md
+++ b/README.md
@@ -264,7 +264,7 @@ Flags:
### status
-View the status of a tileset. This includes how many jobs are queued, processing, and complete.
+View the status of the most recent job for a tileset. To get more detailed information about a tileset's jobs, including the timestamps of failed and successful jobs, use the `tilesets jobs <tileset_id>` command.
```
tilesets status <tileset_id>
diff --git a/mapbox_tilesets/__init__.py b/mapbox_tilesets/__init__.py
index ec7e72a..43d023b 100644
--- a/mapbox_tilesets/__init__.py
+++ b/mapbox_tilesets/__init__.py
@@ -1,3 +1,3 @@
"""mapbox_tilesets package"""
-__version__ = "1.4.0"
+__version__ = "1.4.1"
diff --git a/mapbox_tilesets/scripts/cli.py b/mapbox_tilesets/scripts/cli.py
index 07eb99a..6fbecc8 100644
--- a/mapbox_tilesets/scripts/cli.py
+++ b/mapbox_tilesets/scripts/cli.py
@@ -232,12 +232,18 @@ def status(tileset, token=None, indent=None):
mapbox_api = utils._get_api()
mapbox_token = utils._get_token(token)
s = utils._get_session()
- url = "{0}/tilesets/v1/{1}/status?access_token={2}".format(
+ url = "{0}/tilesets/v1/{1}/jobs?limit=1&access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
r = s.get(url)
- click.echo(json.dumps(r.json(), indent=indent))
+ status = {}
+ for job in r.json():
+ status["id"] = job["tilesetId"]
+ status["latest_job"] = job["id"]
+ status["status"] = job["stage"]
+
+ click.echo(json.dumps(status, indent=indent))
@cli.command("tilejson")
|
mapbox/tilesets-cli
|
9ba2d4b2c9c024d503ecad814517cde99d83cbc3
|
diff --git a/tests/test_cli_status.py b/tests/test_cli_status.py
index 7cdb53c..c032f89 100644
--- a/tests/test_cli_status.py
+++ b/tests/test_cli_status.py
@@ -13,27 +13,37 @@ def test_cli_status(mock_request_get, MockResponse):
runner = CliRunner()
# sends expected request
- message = {"message": "mock message"}
+ message = [{"id": "a123", "stage": "processing", "tilesetId": "test.id"}]
mock_request_get.return_value = MockResponse(message)
result = runner.invoke(status, ["test.id"])
mock_request_get.assert_called_with(
- "https://api.mapbox.com/tilesets/v1/test.id/status?access_token=fake-token"
+ "https://api.mapbox.com/tilesets/v1/test.id/jobs?limit=1&access_token=fake-token"
)
assert result.exit_code == 0
- assert json.loads(result.output) == message
+ expected_status = {
+ "id": "test.id",
+ "status": "processing",
+ "latest_job": "a123",
+ }
+ assert json.loads(result.output) == expected_status
@pytest.mark.usefixtures("token_environ")
@mock.patch("requests.Session.get")
def test_cli_status_use_token_flag(mock_request_get, MockResponse):
runner = CliRunner()
- message = {"message": "mock message"}
+ message = [{"id": "a123", "stage": "processing", "tilesetId": "test.id"}]
mock_request_get.return_value = MockResponse(message)
# Provides the flag --token
result = runner.invoke(status, ["test.id", "--token", "flag-token"])
mock_request_get.assert_called_with(
- "https://api.mapbox.com/tilesets/v1/test.id/status?access_token=flag-token"
+ "https://api.mapbox.com/tilesets/v1/test.id/jobs?limit=1&access_token=flag-token"
)
assert result.exit_code == 0
- assert json.loads(result.output) == {"message": "mock message"}
+ expected_status = {
+ "id": "test.id",
+ "status": "processing",
+ "latest_job": "a123",
+ }
+ assert json.loads(result.output) == expected_status
|
Modify `status` command to rely on the `/jobs` API endpoint
Now that the `/jobs` endpoint supports `limit`, we can switch the `tileset status` command to rely on the `/jobs` endpoint under the hood. We can also add more information to the output of `tileset status`, like the [latest modified timestamp](https://github.com/mapbox/tilesets-cli/issues/81) and any warnings associated with the latest job.
cc/ @dianeschulze
|
0.0
|
9ba2d4b2c9c024d503ecad814517cde99d83cbc3
|
[
"tests/test_cli_status.py::test_cli_status",
"tests/test_cli_status.py::test_cli_status_use_token_flag"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-04 23:28:36+00:00
|
bsd-2-clause
| 3,706 |
|
mapbox__untiler-20
|
diff --git a/untiler/__init__.py b/untiler/__init__.py
index 4191f7f..2e453dc 100644
--- a/untiler/__init__.py
+++ b/untiler/__init__.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
from __future__ import with_statement
from __future__ import print_function
+from __future__ import division
import os
from multiprocessing import Pool
@@ -137,7 +138,8 @@ def logwriter(openLogFile, writeObj):
def streaming_tile_worker(data):
size = 2 ** (data['zMax'] - globalArgs['compositezoom']) * globalArgs['tileResolution']
out_meta = make_src_meta(merc.bounds(data['x'], data['y'], data['z']), size, globalArgs['creation_opts'])
- filename = globalArgs['sceneTemplate'] % (data['z'], data['x'], data['y'])
+ z, x, y = [int(i) for i in (data['z'], data['x'], data['y'])]
+ filename = globalArgs['sceneTemplate'] % (z, x, y)
subtiler = tile_utils.TileUtils()
log = 'FILE: %s\n' % filename
try:
@@ -158,7 +160,7 @@ def streaming_tile_worker(data):
print('filling')
## Read and write the fill tiles first
for t in subtiler.get_fill_super_tiles(superTiles, data['maxCovTiles'], fThresh):
- z, x, y = t
+ z, x, y = [int(i) for i in t]
path = globalArgs['readTemplate'] % (z, x, y)
log += '%s %s %s\n' % (z, x, y)
@@ -176,7 +178,7 @@ def streaming_tile_worker(data):
baseX, baseY = subtiler.get_sub_base_zoom(data['x'], data['y'], data['z'], data['zMax'])
for t in data['zMaxTiles']:
- z, x, y = t
+ z, x, y = [int(i) for i in t]
path = globalArgs['readTemplate'] % (z, x, y)
log += '%s %s %s\n' % (z, x, y)
diff --git a/untiler/scripts/mbtiles_extract.py b/untiler/scripts/mbtiles_extract.py
index ea942d4..9d63a8a 100644
--- a/untiler/scripts/mbtiles_extract.py
+++ b/untiler/scripts/mbtiles_extract.py
@@ -6,12 +6,12 @@ from mbutil import mbtiles_to_disk
import contextlib
import sys
-import cStringIO
+from io import StringIO
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
- sys.stdout = cStringIO.StringIO()
+ sys.stdout = StringIO()
yield
sys.stdout = save_stdout
diff --git a/untiler/scripts/tile_utils.py b/untiler/scripts/tile_utils.py
index 9a599a4..3620675 100644
--- a/untiler/scripts/tile_utils.py
+++ b/untiler/scripts/tile_utils.py
@@ -1,3 +1,4 @@
+from __future__ import division
import numpy as np
import re
from collections import OrderedDict
@@ -46,7 +47,7 @@ class TileUtils:
if np.any(subTiles[:, 0] < zoom):
raise ValueError("Cannot get super tiles of tile array w/ smaller zoom")
zoomdiffs = 2 ** (subTiles[:, 0] - zoom)
- superTiles = subTiles / np.vstack(zoomdiffs)
+ superTiles = subTiles // np.vstack(zoomdiffs)
superTiles[:,0] = zoom
return superTiles
@@ -154,4 +155,4 @@ def parse_template(template):
if __name__ == "__main__":
TileUtils()
- parse_template()
\ No newline at end of file
+ parse_template()
|
mapbox/untiler
|
e786e92d0c14ff214e153261083129ef7a51bf04
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index d00abcf..41dc1d7 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,24 +1,34 @@
-from click.testing import CliRunner
+import os
+import shutil
+import uuid
-from untiler.scripts.cli import cli
-import os, shutil, mercantile, pytest
+from click.testing import CliRunner
+import mercantile
import numpy as np
+import pytest
import rasterio as rio
+from untiler.scripts.cli import cli
+
class TestTiler:
- def __init__(self, path):
- self.path = path
- self.cleanup()
+ def __init__(self):
+ self.path = '/tmp/test-untiler-' + str(uuid.uuid4())
os.mkdir(self.path)
self.imgs = ['tests/fixtures/fill_img.jpg', 'tests/fixtures/fill_img_grey.jpg']
- def cleanup(self):
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
try:
- shutil.rmtree(self.path)
+ self.cleanup()
except:
pass
+ def cleanup(self):
+ shutil.rmtree(self.path)
+
def add_tiles(self, zMin, zMax):
zooms = np.arange(zMax - zMin + 2) + zMin - 1
@@ -30,9 +40,9 @@ class TestTiler:
if not os.path.isdir(basepath):
os.mkdir(basepath)
- for i in xrange(1, len(zooms)):
+ for i in range(1, len(zooms)):
tiles = []
- os.mkdir("%s/%s" % (basepath, zooms[i]))
+ os.mkdir("%s/%s" % (basepath, zooms[i]))
for t in obj[zooms[i - 1]]:
for tt in mercantile.children(t):
tiles.append(tt)
@@ -45,107 +55,103 @@ class TestTiler:
"%s/%s/%s/%s.jpg" % (basepath, zooms[i], tt.x, tt.y))
obj[zooms[i]] = tiles
-def test_cli_streamdir_all_ok():
- testtiles = TestTiler('/tmp/test-untiler')
- testtiles.add_tiles(15, 19)
- runner = CliRunner()
-
- result = runner.invoke(cli, ['streamdir', '/tmp/test-untiler', '/tmp/test-untiler', '-c', '14'])
-
- assert result.output.rstrip() == '/tmp/test-untiler/14-2621-6348-tile.tif'
- with rio.open(result.output.rstrip()) as src:
- assert src.shape == (8192, 8192)
- assert src.count == 4
+def test_cli_streamdir_all_ok():
+ with TestTiler() as testtiles:
+ testtiles.add_tiles(15, 18)
+ tmp = testtiles.path
+ runner = CliRunner()
+ result = runner.invoke(cli, ['streamdir', tmp, tmp, '-c', '14'])
+ assert result.output.rstrip() == os.path.join(tmp, '14-2621-6348-tile.tif')
+ with rio.open(result.output.rstrip()) as src:
+ assert src.shape == (4096, 4096) # matches z18
+ assert src.count == 4
- testtiles.cleanup()
def test_cli_streamdir_mixed_ok():
- testtiles = TestTiler('/tmp/test-untiler')
- testtiles.add_tiles(15, 16)
- testtiles.add_tiles(17, 19)
- runner = CliRunner()
+ with TestTiler() as testtiles:
+ testtiles.add_tiles(15, 16)
+ testtiles.add_tiles(17, 18)
+ tmp = testtiles.path
+ runner = CliRunner()
+ result = runner.invoke(cli, ['streamdir', tmp, tmp, '-c', '14'])
+ assert result.output.rstrip() == os.path.join(tmp, '14-2621-6348-tile.tif')
- result = runner.invoke(cli, ['streamdir', '/tmp/test-untiler', '/tmp/test-untiler', '-c', '14'])
-
- assert result.output.rstrip() == '/tmp/test-untiler/14-2621-6348-tile.tif'
+ with rio.open(result.output.rstrip()) as src:
+ assert src.shape == (4096, 4096) # matches z18
+ assert src.count == 4
- with rio.open(result.output.rstrip()) as src:
- assert src.shape == (8192, 8192)
- assert src.count == 4
- testtiles.cleanup()
+def test_cli_streamdir_mixed_ok_poo():
+ with TestTiler() as testtiles:
+ testtiles.add_tiles(15, 16)
+ tmp = testtiles.path
+ runner = CliRunner()
+ result = runner.invoke(cli, ['streamdir', tmp, tmp, '-c', '14', '-t', 'poo/{z}/{z}/{z}.jpg'])
+ assert result.exit_code == -1
-def test_cli_streamdir_mixed_ok():
- testtiles = TestTiler('/tmp/test-untiler')
- testtiles.add_tiles(15, 16)
- runner = CliRunner()
-
- result = runner.invoke(cli, ['streamdir', '/tmp/test-untiler', '/tmp/test-untiler', '-c', '14', '-t', 'poo/{z}/{z}/{z}.jpg'])
-
- assert result.exit_code == -1
-
- testtiles.cleanup()
def test_cli_baddir_fails():
- rdir = '/tmp' + ''.join(np.random.randint(0,9,10).astype(str))
+ rdir = '/tmp/this/does/not.exist'
runner = CliRunner()
-
result = runner.invoke(cli, ['streamdir', rdir, rdir, '-c', '14'])
-
assert result.exit_code == 2
+
def test_cli_badoutput_fails():
- pdir = '/tmp/' + ''.join(np.random.randint(0,9,10).astype(str))
- rdir = '/tmp/' + ''.join(np.random.randint(0,9,10).astype(str))
+ pdir = '/tmp/test-untiler-' + str(uuid.uuid4())
+ rdir = '/tmp/test-untiler-' + str(uuid.uuid4())
os.mkdir(pdir)
runner = CliRunner()
-
result = runner.invoke(cli, ['streamdir', pdir, rdir, '-c', '14'])
-
assert result.exit_code == 2
+ try:
+ shutil.rmtree(pdir)
+ shutil.rmtree(rdir)
+ except:
+ pass
- shutil.rmtree(pdir)
def test_diff_zooms():
- testtiles = TestTiler('/tmp/test-untiler')
- testtiles.add_tiles(15, 16)
- testtiles.add_tiles(17, 18)
- runner = CliRunner()
-
- result = runner.invoke(cli, ['streamdir', '/tmp/test-untiler', '/tmp/test-untiler', '-c', '15'])
+ with TestTiler() as testtiles:
+ testtiles.add_tiles(15, 16)
+ testtiles.add_tiles(17, 18)
+ tmp = testtiles.path
+ runner = CliRunner()
- expected_scenes = '/tmp/test-untiler/15-5242-12696-tile.tif\n/tmp/test-untiler/15-5243-12696-tile.tif\n/tmp/test-untiler/15-5243-12697-tile.tif\n/tmp/test-untiler/15-5242-12697-tile.tif\n'
+ runner.invoke(cli, ['streamdir', tmp, tmp, '-c', '15'])
- with rio.open('/tmp/test-untiler/15-5242-12697-tile.tif') as src:
- assert src.shape == (2048, 2048)
- assert src.count == 4
+ with rio.open(os.path.join(tmp, '15-5242-12697-tile.tif')) as src:
+ assert src.shape == (2048, 2048)
+ assert src.count == 4
- with rio.open('/tmp/test-untiler/15-5242-12696-tile.tif') as src:
- assert src.shape == (512, 512)
- assert src.count == 4
+ with rio.open(os.path.join(tmp, '15-5242-12696-tile.tif')) as src:
+ assert src.shape == (512, 512)
+ assert src.count == 4
- testtiles.cleanup()
def test_extract_mbtiles():
- testpath = '/tmp/' + ''.join(np.random.randint(0,9,10).astype(str))
- testmbtiles = os.path.join(os.path.dirname(__file__), 'fixtures/testtiles.mbtiles')
- os.mkdir(testpath)
- runner = CliRunner()
- result = runner.invoke(cli, ['streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s', '{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
- assert result.exit_code == 0
- expected_checksums = [[13858, 8288, 51489, 31223], [17927, 52775, 411, 9217]]
- for o, c in zip(result.output.rstrip().split('\n'), expected_checksums):
- with rio.open(o) as src:
- checksums = [src.checksum(i) for i in src.indexes]
- assert checksums == c
- shutil.rmtree(testpath)
+ with TestTiler() as tt:
+ testpath = tt.path
+ testmbtiles = os.path.join(os.path.dirname(__file__), 'fixtures/testtiles.mbtiles')
+ runner = CliRunner()
+ result = runner.invoke(cli, [
+ 'streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s',
+ '{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
+ assert result.exit_code == 0
+ expected_checksums = [[13858, 8288, 51489, 31223], [17927, 52775, 411, 9217]]
+ for o, c in zip(result.output.rstrip().split('\n'), expected_checksums):
+ with rio.open(o) as src:
+ checksums = [src.checksum(i) for i in src.indexes]
+ assert checksums == c
+
def test_extract_mbtiles_fails():
- testpath = '/tmp/' + ''.join(np.random.randint(0,9,10).astype(str))
- testmbtiles = os.path.join(os.path.dirname(__file__), 'fixtures/bad.mbtiles')
- os.mkdir(testpath)
- runner = CliRunner()
- result = runner.invoke(cli, ['streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s', '{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
- assert result.exit_code == -1
- shutil.rmtree(testpath)
\ No newline at end of file
+ with TestTiler() as tt:
+ testpath = tt.path
+ testmbtiles = os.path.join(os.path.dirname(__file__), 'fixtures/bad.mbtiles')
+ runner = CliRunner()
+ result = runner.invoke(cli, [
+ 'streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s',
+ '{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
+ assert result.exit_code == -1
diff --git a/tests/test_untiler_funcs.py b/tests/test_untiler_funcs.py
index 8cf433c..9a0cf25 100644
--- a/tests/test_untiler_funcs.py
+++ b/tests/test_untiler_funcs.py
@@ -1,15 +1,18 @@
#!/usr/bin/env python
+import json
+import pickle
+import os
import pytest
-import untiler
-from untiler.scripts import tile_utils
-
import numpy as np
-import json, pickle, os
import mercantile as merc
import inspect
import rasterio
+import untiler
+from untiler.scripts import tile_utils
+
+
def test_templating_good_jpg():
print("")
expectedMatch = 'tarbase/jpg/\d+/\d+/\d+.jpg'
@@ -170,7 +173,7 @@ def test_affine():
@pytest.fixture
def expectedMeta():
- with open('tests/expected/src_meta.pkl') as pklfile:
+ with open('tests/expected/src_meta.pkl', mode='rb') as pklfile:
return pickle.load(pklfile)
|
Python 3.6 compatibility
A few things to change (that I see now):
- A few integer division changes: https://github.com/mapbox/untiler/blob/master/untiler/scripts/tile_utils.py#L49
- Bigger prob: `mbutil` is (a lot farther) from 3.5 https://github.com/mapbox/untiler/blob/master/untiler/scripts/mbtiles_extract.py#L5. I think removing it entirely and reading tiles using sqlite3 is preferable.
cc @perrygeo
|
0.0
|
e786e92d0c14ff214e153261083129ef7a51bf04
|
[
"tests/test_cli.py::test_cli_baddir_fails",
"tests/test_cli.py::test_cli_badoutput_fails",
"tests/test_untiler_funcs.py::test_templating_fails",
"tests/test_untiler_funcs.py::tests_templating_scene_template_fails",
"tests/test_untiler_funcs.py::tests_templating_scene_template_separator_fails",
"tests/test_untiler_funcs.py::test_parse_tiles",
"tests/test_untiler_funcs.py::test_get_xys",
"tests/test_untiler_funcs.py::test_get_xys_invalid_tiles",
"tests/test_untiler_funcs.py::test_get_xys_invalid_zoom",
"tests/test_untiler_funcs.py::test_affine",
"tests/test_untiler_funcs.py::test_src_meta_making",
"tests/test_untiler_funcs.py::test_make_window",
"tests/test_untiler_funcs.py::test_make_window_fails",
"tests/test_untiler_funcs.py::test_upsampling",
"tests/test_untiler_funcs.py::test_affaux",
"tests/test_untiler_funcs.py::test_make_grey_imagedata",
"tests/test_untiler_funcs.py::test_make_rgb_imagedata",
"tests/test_untiler_funcs.py::test_load_imagedata_rgb",
"tests/test_untiler_funcs.py::test_load_imagedata_grey",
"tests/test_untiler_funcs.py::test_make_grey_depth2_fails",
"tests/test_untiler_funcs.py::test_load_imagedata_random",
"tests/test_untiler_funcs.py::test_load_imagedata_fails",
"tests/test_untiler_funcs.py::test_create_supertiles",
"tests/test_untiler_funcs.py::test_create_supertiles_fails",
"tests/test_untiler_funcs.py::test_find_unique_tiles",
"tests/test_untiler_funcs.py::test_find_zoom_tiles",
"tests/test_untiler_funcs.py::test_find_zoom_tiles_fail",
"tests/test_untiler_funcs.py::test_find_zoom_tiles_floor_fail",
"tests/test_untiler_funcs.py::test_find_zoom_tiles_floor",
"tests/test_untiler_funcs.py::test_logger"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-05-02 00:08:22+00:00
|
mit
| 3,707 |
|
mapbox__untiler-28
|
diff --git a/.travis.yml b/.travis.yml
index 96731bd..1a637a5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,54 +3,27 @@ language: python
sudo: false
cache:
- # Apparently if you override the install command that silently disables the
- # cache: pip support. This is less than ideal and I've opened up
- # travis-ci/travis-ci#3239 to hopefully get that addressed. For now I'll
- # manually add the pip cache directory to the build cache.
directories:
- ~/.cache/pip
env:
global:
- # These two environment variables could be set by Travis itself, or Travis
- # could configure itself in /etc/, ~/, or inside of the virtual
- # environments. In any case if these two values get configured then end
- # users only need to enable the pip cache and manually run pip wheel before
- # running pip install.
- PIP_WHEEL_DIR=$HOME/.cache/pip/wheels
- PIP_FIND_LINKS=file://$HOME/.cache/pip/wheels
python:
- - "2.7"
- "3.6"
-
-addons:
- apt:
- packages:
- - libgdal1h
- - gdal-bin
- - libgdal-dev
- - libatlas-dev
- - libatlas-base-dev
- - liblapack-dev
- - gfortran
- - libgmp-dev
- - libmpfr-dev
+ - "3.7"
before_install:
- pip install -U pip
- - pip install wheel
+ - pip install -r requirements.txt
install:
- - "pip wheel -r requirements.txt"
- # Actually install our dependencies now, this will pull from the directory
- # that the first command placed the Wheels into.
- - "pip install --no-binary rasterio -r requirements.txt"
- - "pip install coveralls"
- - "pip install -e .[test]"
+ - pip install -e .[test]
script:
- - py.test --cov untiler --cov-report term-missing
+ - python -m pytest --cov untiler --cov-report term-missing
after_success:
- - coveralls
+ - coveralls
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index d5a62da..7b32ce5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
-rasterio==1.0a8
+rasterio==1.1.2
mercantile
mbutil
diff --git a/untiler/scripts/tile_utils.py b/untiler/scripts/tile_utils.py
index 3620675..486f1ea 100644
--- a/untiler/scripts/tile_utils.py
+++ b/untiler/scripts/tile_utils.py
@@ -148,7 +148,7 @@ def parse_template(template):
if len(separator) != 2 or separator[0] != separator[1]:
raise ValueError('Too many / not matching separators!')
- return valPattern.sub('\d+', template), valPattern.sub('%s', template), separator[0]
+ return valPattern.sub(r"\\d+", template), valPattern.sub("%s", template), separator[0]
else:
raise ValueError('Invalid template "%s"' % (template))
|
mapbox/untiler
|
6f1fe9c9cedb8701124c4b9f78712ea9de4d8a14
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 41dc1d7..5ab5c93 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -7,7 +7,7 @@ import mercantile
import numpy as np
import pytest
import rasterio as rio
-
+import sqlite3
from untiler.scripts.cli import cli
@@ -88,7 +88,8 @@ def test_cli_streamdir_mixed_ok_poo():
tmp = testtiles.path
runner = CliRunner()
result = runner.invoke(cli, ['streamdir', tmp, tmp, '-c', '14', '-t', 'poo/{z}/{z}/{z}.jpg'])
- assert result.exit_code == -1
+
+ assert result.exc_info[0] == ValueError
def test_cli_baddir_fails():
@@ -139,11 +140,15 @@ def test_extract_mbtiles():
'streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s',
'{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
assert result.exit_code == 0
+
expected_checksums = [[13858, 8288, 51489, 31223], [17927, 52775, 411, 9217]]
- for o, c in zip(result.output.rstrip().split('\n'), expected_checksums):
+ checksums = []
+
+ for o in result.output.rstrip().split('\n'):
with rio.open(o) as src:
- checksums = [src.checksum(i) for i in src.indexes]
- assert checksums == c
+ checksums.append([src.checksum(i) for i in src.indexes])
+
+ assert sorted(checksums) == sorted(expected_checksums)
def test_extract_mbtiles_fails():
@@ -151,7 +156,9 @@ def test_extract_mbtiles_fails():
testpath = tt.path
testmbtiles = os.path.join(os.path.dirname(__file__), 'fixtures/bad.mbtiles')
runner = CliRunner()
+
result = runner.invoke(cli, [
'streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s',
'{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
- assert result.exit_code == -1
+
+ assert result.exc_info[0] == sqlite3.OperationalError
diff --git a/tests/test_untiler_funcs.py b/tests/test_untiler_funcs.py
index 9a0cf25..a5d30bf 100644
--- a/tests/test_untiler_funcs.py
+++ b/tests/test_untiler_funcs.py
@@ -105,7 +105,7 @@ def expectedTileList():
return np.array(json.load(ofile))
def test_parse_tiles(inputTilenames, expectedTileList):
- matchTemplate = '3857_9_83_202_20130517_242834/jpg/\d+/\d+/\d+.jpg'
+ matchTemplate = r'3857_9_83_202_20130517_242834/jpg/\d+/\d+/\d+.jpg'
tiler = tile_utils.TileUtils()
|
Tests broken on master
Tests fail on Python 3.6 - see [this Travis run](https://travis-ci.org/mapbox/untiler/builds/229187081).
<details>
<summaryFailure</summary>
<pre>
def test_extract_mbtiles():
with TestTiler() as tt:
testpath = tt.path
testmbtiles = os.path.join(os.path.dirname(__file__), 'fixtures/testtiles.mbtiles')
runner = CliRunner()
result = runner.invoke(cli, [
'streammbtiles', testmbtiles, testpath, '-z', '16', '-x', '-s',
'{z}-{x}-{y}-mbtiles.tif', '--co', 'compress=lzw'])
assert result.exit_code == 0
expected_checksums = [[13858, 8288, 51489, 31223], [17927, 52775, 411, 9217]]
for o, c in zip(result.output.rstrip().split('\n'), expected_checksums):
with rio.open(o) as src:
checksums = [src.checksum(i) for i in src.indexes]
> assert checksums == c
E assert [17927, 52775, 411, 9217] == [13858, 8288, 51489, 31223]
E At index 0 diff: 17927 != 13858
E Use -v to get the full diff
</pre>
</details>
<br />
Noticed this while working on the [autodeploy branch](https://github.com/mapbox/untiler/tree/autodeploy). Initially thought this was failing due to changes I'd made, but I think this is some sort of config/dependency rot that's happening otherwise.
|
0.0
|
6f1fe9c9cedb8701124c4b9f78712ea9de4d8a14
|
[
"tests/test_cli.py::test_cli_streamdir_all_ok",
"tests/test_cli.py::test_cli_streamdir_mixed_ok",
"tests/test_cli.py::test_diff_zooms",
"tests/test_cli.py::test_extract_mbtiles",
"tests/test_untiler_funcs.py::test_templating_good_jpg",
"tests/test_untiler_funcs.py::test_templating_good_png",
"tests/test_untiler_funcs.py::tests_templating_scene_template",
"tests/test_untiler_funcs.py::tests_templating_scene_template_numeric"
] |
[
"tests/test_cli.py::test_cli_streamdir_mixed_ok_poo",
"tests/test_cli.py::test_cli_baddir_fails",
"tests/test_cli.py::test_cli_badoutput_fails",
"tests/test_cli.py::test_extract_mbtiles_fails",
"tests/test_untiler_funcs.py::test_templating_fails",
"tests/test_untiler_funcs.py::tests_templating_scene_template_fails",
"tests/test_untiler_funcs.py::tests_templating_scene_template_separator_fails",
"tests/test_untiler_funcs.py::test_parse_tiles",
"tests/test_untiler_funcs.py::test_get_xys",
"tests/test_untiler_funcs.py::test_get_xys_invalid_tiles",
"tests/test_untiler_funcs.py::test_get_xys_invalid_zoom",
"tests/test_untiler_funcs.py::test_affine",
"tests/test_untiler_funcs.py::test_src_meta_making",
"tests/test_untiler_funcs.py::test_make_window",
"tests/test_untiler_funcs.py::test_make_window_fails",
"tests/test_untiler_funcs.py::test_upsampling",
"tests/test_untiler_funcs.py::test_affaux",
"tests/test_untiler_funcs.py::test_make_grey_imagedata",
"tests/test_untiler_funcs.py::test_make_rgb_imagedata",
"tests/test_untiler_funcs.py::test_load_imagedata_rgb",
"tests/test_untiler_funcs.py::test_load_imagedata_grey",
"tests/test_untiler_funcs.py::test_make_grey_depth2_fails",
"tests/test_untiler_funcs.py::test_load_imagedata_random",
"tests/test_untiler_funcs.py::test_load_imagedata_fails",
"tests/test_untiler_funcs.py::test_create_supertiles",
"tests/test_untiler_funcs.py::test_create_supertiles_fails",
"tests/test_untiler_funcs.py::test_find_unique_tiles",
"tests/test_untiler_funcs.py::test_find_zoom_tiles",
"tests/test_untiler_funcs.py::test_find_zoom_tiles_fail",
"tests/test_untiler_funcs.py::test_find_zoom_tiles_floor_fail",
"tests/test_untiler_funcs.py::test_find_zoom_tiles_floor",
"tests/test_untiler_funcs.py::test_logger"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-20 16:21:58+00:00
|
mit
| 3,708 |
|
mapping-commons__sssom-py-341
|
diff --git a/setup.cfg b/setup.cfg
index 595b94a..f61ded4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -49,6 +49,7 @@ keywords =
[options]
install_requires =
+ sqlalchemy<2.0.0
pyparsing
bioregistry
click
diff --git a/sssom/io.py b/sssom/io.py
index 69bf759..b47cd2a 100644
--- a/sssom/io.py
+++ b/sssom/io.py
@@ -141,7 +141,6 @@ def split_file(input_path: str, output_directory: Union[str, Path]) -> None:
def _get_prefix_map(metadata: Metadata, prefix_map_mode: str = None):
-
if prefix_map_mode is None:
prefix_map_mode = PREFIX_MAP_MODE_METADATA_ONLY
diff --git a/sssom/parsers.py b/sssom/parsers.py
index 99abc60..3c29adb 100644
--- a/sssom/parsers.py
+++ b/sssom/parsers.py
@@ -307,10 +307,11 @@ def _init_mapping_set(meta: Optional[MetadataType]) -> MappingSet:
def _get_mdict_ms_and_bad_attrs(
row: pd.Series, ms: MappingSet, bad_attrs: Counter
) -> Tuple[dict, MappingSet, Counter]:
-
mdict = {}
sssom_schema_object = (
- SSSOMSchemaView.instance if SSSOMSchemaView.instance else SSSOMSchemaView()
+ SSSOMSchemaView.instance
+ if hasattr(SSSOMSchemaView, "instance")
+ else SSSOMSchemaView()
)
for k, v in row.items():
if v and v == v:
diff --git a/sssom/util.py b/sssom/util.py
index a336950..87ed0ae 100644
--- a/sssom/util.py
+++ b/sssom/util.py
@@ -400,15 +400,16 @@ def assign_default_confidence(
"""
# Get rows having numpy.NaN as confidence
if df is not None:
- if CONFIDENCE not in df.columns:
- df[CONFIDENCE] = np.NaN
- nan_df = pd.DataFrame(columns=df.columns)
+ new_df = df.copy()
+ if CONFIDENCE not in new_df.columns:
+ new_df[CONFIDENCE] = np.NaN
+ nan_df = pd.DataFrame(columns=new_df.columns)
else:
- df = df[~df[CONFIDENCE].isna()]
+ new_df = df[~df[CONFIDENCE].isna()]
nan_df = df[df[CONFIDENCE].isna()]
else:
ValueError("DataFrame cannot be empty to 'assign_default_confidence'.")
- return df, nan_df
+ return new_df, nan_df
def remove_unmatched(df: pd.DataFrame) -> pd.DataFrame:
@@ -1496,7 +1497,6 @@ def are_params_slots(params: dict) -> bool:
def _get_sssom_schema_object() -> SSSOMSchemaView:
-
sssom_sv_object = (
SSSOMSchemaView.instance
if hasattr(SSSOMSchemaView, "instance")
diff --git a/tox.ini b/tox.ini
index e8518b8..9b4caff 100644
--- a/tox.ini
+++ b/tox.ini
@@ -34,7 +34,7 @@ description = Run code formatters and linters.
[testenv:flake8]
skip_install = true
commands =
- flake8 sssom/ tests/ setup.py
+ flake8 sssom/ tests/
deps =
flake8<5.0.0
flake8-black
|
mapping-commons/sssom-py
|
c490d6740dab1f6c9213984bfae98d7306238c0f
|
diff --git a/tests/test_collapse.py b/tests/test_collapse.py
index 9cfe58b..bd05869 100644
--- a/tests/test_collapse.py
+++ b/tests/test_collapse.py
@@ -44,7 +44,7 @@ class TestCollapse(unittest.TestCase):
def test_filter(self):
"""Test the row count after filtering redundant rows."""
df = filter_redundant_rows(self.df)
- self.assertEqual(len(df), 91)
+ self.assertEqual(len(df), 92)
def test_ptable(self):
"""Test the row count of the ptable export."""
|
SSSOM code broken
It's really frustrating to rely on this package. Sometimes it just breaks and then my builds that are relying on it don't work. This makes me want to make my own SSSOM package, which I won't have time to maintain either, but at least I will be able to know how it works...
Can someone help figure out what's going on in https://github.com/biopragmatics/biomappings/actions/runs/4075209444/jobs/7021308103?
|
0.0
|
c490d6740dab1f6c9213984bfae98d7306238c0f
|
[
"tests/test_collapse.py::TestCollapse::test_filter"
] |
[
"tests/test_collapse.py::TestCollapse::test_collapse",
"tests/test_collapse.py::TestCollapse::test_diff",
"tests/test_collapse.py::TestCollapse::test_groupings",
"tests/test_collapse.py::TestCollapse::test_ptable",
"tests/test_collapse.py::TestCollapse::test_row_count"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-02 15:48:18+00:00
|
cc0-1.0
| 3,709 |
|
marcelm__cutadapt-695
|
diff --git a/src/cutadapt/kmer_heuristic.py b/src/cutadapt/kmer_heuristic.py
index ffdf035..0b26d14 100644
--- a/src/cutadapt/kmer_heuristic.py
+++ b/src/cutadapt/kmer_heuristic.py
@@ -1,39 +1,29 @@
import io
-import itertools
-import sys
from typing import List, Optional, Set, Tuple
from collections import defaultdict
-def kmer_possibilities(sequence: str, chunks: int) -> List[Set[str]]:
+def kmer_chunks(sequence: str, chunks: int) -> Set[str]:
"""
- Partition a sequence in almost equal sized chunks. Return all possibilities.
-
- Example sequence ABCDEFGH with 3 chunks. Possibilities:
- ["ABC", "DEF", "GH"]; ["ABC", "DE", "FGH"]; ["AB", "CDE", "FGH"]
+ Partition a sequence in almost equal sized chunks. Returns the shortest
+ possibility. AABCABCABC, 3 returns {"AABC", "ABC"}
"""
chunk_size = len(sequence) // (chunks)
remainder = len(sequence) % (chunks)
chunk_sizes: List[int] = remainder * [chunk_size + 1] + (chunks - remainder) * [
chunk_size
]
- possible_orderings = set(itertools.permutations(chunk_sizes))
- kmer_sets = []
- for chunk_list in possible_orderings:
- offset = 0
- chunk_set = set()
- for size in chunk_list:
- chunk_set.add(sequence[offset : offset + size])
- offset += size
- kmer_sets.append(chunk_set)
- return kmer_sets
+ offset = 0
+ chunk_set = set()
+ for size in chunk_sizes:
+ chunk_set.add(sequence[offset : offset + size])
+ offset += size
+ return chunk_set
-# A SearchSet is a start and stop combined with a list of possible kmer sets
-# which should appear between this start and stop. Start and stop follow python
-# indexing rules. (Negative start is a position relative to the end. None end
-# is to the end of the sequence)
-SearchSet = Tuple[int, Optional[int], List[Set[str]]]
+# A SearchSet is a start and stop combined with a set of strings to search
+# for at that position
+SearchSet = Tuple[int, Optional[int], Set[str]]
def minimize_kmer_search_list(
@@ -74,24 +64,22 @@ def minimize_kmer_search_list(
return kmers_and_positions
-def find_optimal_kmers(
+def remove_redundant_kmers(
search_sets: List[SearchSet],
) -> List[Tuple[int, Optional[int], List[str]]]:
- minimal_score = sys.maxsize
- best_combination = []
- positions = [(start, stop) for start, stop, kmer_set_list in search_sets]
- kmer_set_lists = [kmer_set_list for start, stop, kmer_set_list in search_sets]
- for kmer_sets in itertools.product(*kmer_set_lists):
- kmer_search_list = []
- for kmer_set, (start, stop) in zip(kmer_sets, positions):
- for kmer in kmer_set:
- kmer_search_list.append((kmer, start, stop))
- minimized_search_list = minimize_kmer_search_list(kmer_search_list)
- if len(minimized_search_list) < minimal_score:
- best_combination = minimized_search_list
- minimal_score = len(minimized_search_list)
+ """
+ This removes kmers that are searched in multiple search sets and makes
+ sure they are only searched in the larger search set. This reduces the
+ amount of searched patterns and therefore the number of false positives.
+ """
+
+ kmer_search_list = []
+ for start, stop, kmer_set in search_sets:
+ for kmer in kmer_set:
+ kmer_search_list.append((kmer, start, stop))
+ minimized_search_list = minimize_kmer_search_list(kmer_search_list)
result_dict = defaultdict(list)
- for kmer, start, stop in best_combination:
+ for kmer, start, stop in minimized_search_list:
result_dict[(start, stop)].append(kmer)
return [(start, stop, kmers) for (start, stop), kmers in result_dict.items()]
@@ -120,10 +108,10 @@ def create_back_overlap_searchsets(
min_overlap_kmer_length = 5
if minimum_length < min_overlap_kmer_length:
for i in range(minimum_length, min_overlap_kmer_length):
- search_set = (-i, None, [{adapter[:i]}])
+ search_set = (-i, None, {adapter[:i]})
search_sets.append(search_set)
minimum_length = min_overlap_kmer_length
- kmer_sets = kmer_possibilities(adapter[:minimum_length], max_errors + 1)
+ kmer_sets = kmer_chunks(adapter[:minimum_length], max_errors + 1)
search_sets.append((-length, None, kmer_sets))
minimum_length = length + 1
return search_sets
@@ -166,16 +154,14 @@ def create_positions_and_kmers(
adapter[::-1], min_overlap, error_rate
)
front_search_sets = []
- for start, stop, kmer_sets in reversed_back_search_sets:
- new_kmer_sets = [
- {kmer[::-1] for kmer in kmer_set} for kmer_set in kmer_sets
- ]
- front_search_sets.append((0, -start, new_kmer_sets))
+ for start, stop, kmer_set in reversed_back_search_sets:
+ new_kmer_set = {kmer[::-1] for kmer in kmer_set}
+ front_search_sets.append((0, -start, new_kmer_set))
search_sets.extend(front_search_sets)
if internal:
- kmer_sets = kmer_possibilities(adapter, max_errors + 1)
+ kmer_sets = kmer_chunks(adapter, max_errors + 1)
search_sets.append((0, None, kmer_sets))
- return find_optimal_kmers(search_sets)
+ return remove_redundant_kmers(search_sets)
def kmer_probability_analysis(
@@ -215,3 +201,31 @@ def kmer_probability_analysis(
f"Chance for profile hit by random sequence: {(1 - accumulated_not_hit_chance) * 100:.2f}%\n"
)
return out.getvalue()
+
+
+if __name__ == "__main__":
+ # This allows for easy debugging and benchmarking of the kmer heuristic code.
+ import argparse
+ from ._kmer_finder import KmerFinder
+ import dnaio
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--adapter")
+ parser.add_argument("--anywhere", action="store_true")
+ parser.add_argument("fastq")
+ args = parser.parse_args()
+ kmers_and_offsets = create_positions_and_kmers(
+ args.adapter, 3, 0.1, back_adapter=True, front_adapter=args.anywhere
+ )
+ kmer_finder = KmerFinder(kmers_and_offsets)
+ print(kmer_probability_analysis(kmers_and_offsets))
+ with dnaio.open(args.fastq, mode="r", open_threads=0) as reader: # type: ignore
+ number_of_records = 0
+ possible_adapters_found = 0
+ for number_of_records, record in enumerate(reader, start=1):
+ if kmer_finder.kmers_present(record.sequence):
+ possible_adapters_found += 1
+ print(
+ f"Percentage possible adapters: "
+ f"{possible_adapters_found * 100 / number_of_records:.2f}%"
+ )
|
marcelm/cutadapt
|
1551fbd4ebf49ececdca3aa6617734c448a17daa
|
diff --git a/tests/test_kmer_heuristic.py b/tests/test_kmer_heuristic.py
index 1110665..c3a69a9 100644
--- a/tests/test_kmer_heuristic.py
+++ b/tests/test_kmer_heuristic.py
@@ -1,7 +1,7 @@
import pytest
from cutadapt.kmer_heuristic import (
- kmer_possibilities,
+ kmer_chunks,
minimize_kmer_search_list,
create_back_overlap_searchsets,
create_positions_and_kmers,
@@ -11,15 +11,12 @@ from cutadapt.kmer_heuristic import (
@pytest.mark.parametrize(
["sequence", "chunks", "expected"],
[
- ("ABC", 3, [{"A", "B", "C"}]),
- ("ABCD", 3, [{"A", "B", "CD"}, {"A", "BC", "D"}, {"AB", "C", "D"}]),
+ ("ABC", 3, {"A", "B", "C"}),
+ ("ABCD", 3, {"AB", "C", "D"}),
],
)
-def test_kmer_possibilities(sequence, chunks, expected):
- frozen_expected = set(frozenset(s) for s in expected)
- result = kmer_possibilities(sequence, chunks)
- frozen_result = set(frozenset(s) for s in result)
- assert frozen_expected == frozen_result
+def test_kmer_chunks(sequence, chunks, expected):
+ assert kmer_chunks(sequence, chunks) == expected
@pytest.mark.parametrize(
@@ -45,11 +42,11 @@ def test_create_back_overlap_searchsets():
adapter = "ABCDEFGHIJ0123456789"
searchsets = create_back_overlap_searchsets(adapter, 3, 0.1)
assert len(searchsets) == 5
- assert (-3, None, [{"ABC"}]) in searchsets
- assert (-4, None, [{"ABCD"}]) in searchsets
- assert (-9, None, [{"ABCDE"}]) in searchsets
- assert (-19, None, kmer_possibilities(adapter[:10], 2)) in searchsets
- assert (-20, None, kmer_possibilities(adapter, 3)) in searchsets
+ assert (-3, None, {"ABC"}) in searchsets
+ assert (-4, None, {"ABCD"}) in searchsets
+ assert (-9, None, {"ABCDE"}) in searchsets
+ assert (-19, None, kmer_chunks(adapter[:10], 2)) in searchsets
+ assert (-20, None, kmer_chunks(adapter, 3)) in searchsets
@pytest.mark.parametrize(
@@ -106,3 +103,17 @@ def test_create_kmers_and_positions(kwargs, expected):
assert {(start, stop): frozenset(kmers) for start, stop, kmers in result} == {
(start, stop): frozenset(kmers) for start, stop, kmers in expected
}
+
+
[email protected](0.5)
+def test_create_positions_and_kmers_slow():
+ create_positions_and_kmers(
+ # Ridiculous size to check if there aren't any quadratic or exponential
+ # algorithms in the code.
+ "A" * 1000,
+ min_overlap=3,
+ error_rate=0.1,
+ back_adapter=True,
+ front_adapter=False,
+ internal=True,
+ )
|
Cutadapt 4.3 runs excruciatingly long
<!--
When reporting an issue, please include this information:
- Cutadapt and Python version
- How you installed the tool (conda or pip, for example)
- Which command-line parameters you used
If you report unexpected trimming behavior, this would also be helpful:
- An example input read (or read pair)
- The output that cutadapt produces
- The output that you would have expected
Feel free to delete this text before submitting your issue.
-->
### Problem description
Our CI pipeline runtime recently (and suddenly) increased from ~3 min to more than 4h. We were able to identify an interal Cutadapt call as the culprit, with each individual call taking more than 1h. We did not observe a functional deterioration, but I am not 100% sure that our CI pipeline would necessarily pick that up.
The problem occurs **only for the most recent version (`4.3`)**, not with versions `4.1` and `4.2`. Indeed, capping the range of the supported Cutadapt version to `<=4.2` restored our CI runtime back to the typical ~3 min.
### Steps to reproduce
Here is the offending call:
```bash
cutadapt -a A{100} -o out.fastq FILE
```
where `FILE` is, for example, this tiny [test file](https://raw.githubusercontent.com/zavolanlab/htsinfer/dev/tests/files/adapter_single.fastq).
#### Software information
* Python version: 3.10.0; `conda-forge` build `h12debd9_5`
* Cutadapt version: 4.3; `bioconda` buid `py310h1425a21_0`
* Installed via: Conda/Mamba
### Additional info
In our CI pipeline, the problem occurs for Python versions 3.7, 3.8 and 3.9 as well. Installation via Pip was not tested. When recreating the issue locally, one of my cores was running at 100% speed, with very little memory consumption. I did not wait for the command to conclude (my laptop got hot) and see if the output is different from that obtained for older versions (apologies!).
Looking at the changes introduced in `4.3`, my best bet would be on https://github.com/marcelm/cutadapt/pull/663 possibly being the cause of this issue.
|
0.0
|
1551fbd4ebf49ececdca3aa6617734c448a17daa
|
[
"tests/test_kmer_heuristic.py::test_kmer_chunks[ABC-3-expected0]",
"tests/test_kmer_heuristic.py::test_kmer_chunks[ABCD-3-expected1]",
"tests/test_kmer_heuristic.py::test_minimize_kmer_search_list[kmer_search_list0-expected0]",
"tests/test_kmer_heuristic.py::test_minimize_kmer_search_list[kmer_search_list1-expected1]",
"tests/test_kmer_heuristic.py::test_minimize_kmer_search_list[kmer_search_list2-expected2]",
"tests/test_kmer_heuristic.py::test_minimize_kmer_search_list[kmer_search_list3-expected3]",
"tests/test_kmer_heuristic.py::test_minimize_kmer_search_list[kmer_search_list4-expected4]",
"tests/test_kmer_heuristic.py::test_minimize_kmer_search_list[kmer_search_list5-expected5]",
"tests/test_kmer_heuristic.py::test_create_back_overlap_searchsets",
"tests/test_kmer_heuristic.py::test_create_kmers_and_positions[kwargs0-expected0]",
"tests/test_kmer_heuristic.py::test_create_kmers_and_positions[kwargs1-expected1]",
"tests/test_kmer_heuristic.py::test_create_kmers_and_positions[kwargs2-expected2]",
"tests/test_kmer_heuristic.py::test_create_kmers_and_positions[kwargs3-expected3]",
"tests/test_kmer_heuristic.py::test_create_kmers_and_positions[kwargs4-expected4]",
"tests/test_kmer_heuristic.py::test_create_positions_and_kmers_slow"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-12 10:04:26+00:00
|
mit
| 3,710 |
|
marcelm__cutadapt-750
|
diff --git a/src/cutadapt/adapters.py b/src/cutadapt/adapters.py
index 849485e..6c8bd0a 100644
--- a/src/cutadapt/adapters.py
+++ b/src/cutadapt/adapters.py
@@ -610,7 +610,7 @@ class SingleAdapter(Adapter, ABC):
back_adapter: bool,
front_adapter: bool,
internal: bool = True,
- ) -> KmerFinder:
+ ) -> Union[KmerFinder, MockKmerFinder]:
positions_and_kmers = create_positions_and_kmers(
sequence,
self.min_overlap,
@@ -621,9 +621,13 @@ class SingleAdapter(Adapter, ABC):
)
if self._debug:
print(kmer_probability_analysis(positions_and_kmers))
- return KmerFinder(
- positions_and_kmers, self.adapter_wildcards, self.read_wildcards
- )
+ try:
+ return KmerFinder(
+ positions_and_kmers, self.adapter_wildcards, self.read_wildcards
+ )
+ except ValueError:
+ # Kmers too long.
+ return MockKmerFinder()
def __repr__(self):
return (
|
marcelm/cutadapt
|
5f3d9e9d333d814a6b3f17dbcbb3ed0d684126a5
|
diff --git a/tests/test_adapters.py b/tests/test_adapters.py
index e9252e8..ce2b57d 100644
--- a/tests/test_adapters.py
+++ b/tests/test_adapters.py
@@ -639,3 +639,14 @@ def test_noninternal_front_adapter_with_n_wildcards_issue_654():
assert match.astart == 3
assert match.astop == 6
assert match.errors == 1
+
+
+def test_very_long_adapter_issue_749():
+ adapter = BackAdapter("A" * 70, max_errors=0)
+ match = adapter.match_to("GATTAC" + 20 * "A")
+ assert match is not None
+ assert match.rstart == 6
+ assert match.rstop == 26
+ assert match.astart == 0
+ assert match.astop == 20
+ assert match.errors == 0
|
KmerFinder: x of length y is longer than the maximum of 64
I encountered this today in a pipeline I wrote a while ago after updating Cutadapt to the most recent version.
```
$ echo -e '>r\nACGT' | cutadapt --debug -a 'A{70}' -e 0 -
This is cutadapt 4.5.dev50+g0b9c325.d20231106 with Python 3.10.12
Command line parameters: --debug -a A{70} -e 0 -
DEBUG: Python executable: .../cutadapt/.venv/bin/python
DEBUG: dnaio version: 0.10.0
DEBUG: xopen version: 1.7.0
DEBUG: Command line error. Traceback:
Traceback (most recent call last):
File ".../cutadapt/src/cutadapt/cli.py", line 934, in adapters_from_args
adapters = make_adapters_from_specifications(args.adapters, search_parameters)
File ".../cutadapt/src/cutadapt/parser.py", line 386, in make_adapters_from_specifications
adapters.extend(
File ".../cutadapt/src/cutadapt/parser.py", line 425, in make_adapters_from_one_specification
yield make_adapter(spec, adapter_type, search_parameters)
File ".../cutadapt/src/cutadapt/parser.py", line 466, in make_adapter
return _make_not_linked_adapter(spec, name, adapter_type, search_parameters)
File ".../cutadapt/src/cutadapt/parser.py", line 543, in _make_not_linked_adapter
return adapter_class(
File ".../cutadapt/src/cutadapt/adapters.py", line 786, in __init__
super().__init__(*args, **kwargs)
File ".../cutadapt/src/cutadapt/adapters.py", line 590, in __init__
self.kmer_finder = self._kmer_finder()
File ".../cutadapt/src/cutadapt/adapters.py", line 798, in _kmer_finder
return self._make_kmer_finder(
File ".../cutadapt/src/cutadapt/adapters.py", line 624, in _make_kmer_finder
return KmerFinder(
File "src/cutadapt/_kmer_finder.pyx", line 139, in cutadapt._kmer_finder.KmerFinder.__cinit__
raise ValueError(f"{kmer} of length {kmer_length} is longer "
ValueError: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA of length 70 is longer than the maximum of 64.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File ".../cutadapt/src/cutadapt/cli.py", line 1129, in main
adapters, adapters2 = adapters_from_args(args)
File ".../cutadapt/src/cutadapt/cli.py", line 941, in adapters_from_args
raise CommandLineError(e.args[0])
cutadapt.cli.CommandLineError: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA of length 70 is longer than the maximum of 64.
ERROR: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA of length 70 is longer than the maximum of 64.
```
Adapters that are too long for the k-mer finder should skip the k-mer finder optimization.
|
0.0
|
5f3d9e9d333d814a6b3f17dbcbb3ed0d684126a5
|
[
"tests/test_adapters.py::test_very_long_adapter_issue_749"
] |
[
"tests/test_adapters.py::test_back_adapter_absolute_number_of_errors",
"tests/test_adapters.py::test_back_adapter_absolute_number_of_errors_with_wildcards",
"tests/test_adapters.py::test_front_adapter_partial_occurrence_in_back",
"tests/test_adapters.py::test_back_adapter_partial_occurrence_in_front",
"tests/test_adapters.py::test_front_adapter_finds_leftmost_match",
"tests/test_adapters.py::test_rightmost_front_adapter",
"tests/test_adapters.py::test_rightmost_front_adapter_partial_occurrence",
"tests/test_adapters.py::test_wildcards",
"tests/test_adapters.py::test_issue_80",
"tests/test_adapters.py::test_back_adapter_indel_and_exact_occurrence",
"tests/test_adapters.py::test_back_adapter_indel_and_mismatch_occurrence",
"tests/test_adapters.py::test_str",
"tests/test_adapters.py::test_prefix_adapter_with_indels_one_mismatch",
"tests/test_adapters.py::test_prefix_adapter_with_indels_two_mismatches",
"tests/test_adapters.py::test_linked_adapter",
"tests/test_adapters.py::test_linked_adapter_statistics",
"tests/test_adapters.py::test_linked_matches_property",
"tests/test_adapters.py::test_info_record",
"tests/test_adapters.py::test_random_match_probabilities",
"tests/test_adapters.py::test_add_adapter_statistics",
"tests/test_adapters.py::test_no_indels_empty_read[PrefixAdapter]",
"tests/test_adapters.py::test_no_indels_empty_read[SuffixAdapter]",
"tests/test_adapters.py::test_prefix_adapter_match_with_n_wildcard_in_read",
"tests/test_adapters.py::test_suffix_adapter_match_with_n_wildcard_in_read",
"tests/test_adapters.py::test_multiple_adapters",
"tests/test_adapters.py::test_indexed_prefix_adapters",
"tests/test_adapters.py::test_indexed_prefix_adapters_incorrect_type",
"tests/test_adapters.py::test_indexed_very_similar",
"tests/test_adapters.py::test_indexed_too_high_k",
"tests/test_adapters.py::test_indexed_suffix_adapters",
"tests/test_adapters.py::test_indexed_suffix_adapters_incorrect_type",
"tests/test_adapters.py::test_indexed_prefix_adapters_with_indels",
"tests/test_adapters.py::test_indexed_prefix_adapters_with_n_wildcard",
"tests/test_adapters.py::test_indexed_prefix_adapters_with_n_collision[ANGCATCATAAAAAAAAAA]",
"tests/test_adapters.py::test_indexed_prefix_adapters_with_n_collision[AAGCATCATAAAAAAAAAA]",
"tests/test_adapters.py::test_inosine_wildcard",
"tests/test_adapters.py::test_noninternal_front_adapter",
"tests/test_adapters.py::test_noninternal_front_adapter_with_n_wildcards[0]",
"tests/test_adapters.py::test_noninternal_front_adapter_with_n_wildcards[1]",
"tests/test_adapters.py::test_noninternal_front_adapter_with_n_wildcards_issue_654"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-12-19 13:54:33+00:00
|
mit
| 3,711 |
|
marcelm__cutadapt-781
|
diff --git a/src/cutadapt/report.py b/src/cutadapt/report.py
index 84ff9e0..2b47d8e 100644
--- a/src/cutadapt/report.py
+++ b/src/cutadapt/report.py
@@ -177,16 +177,28 @@ class Statistics:
if isinstance(modifier, PolyATrimmer):
self.poly_a_trimmed_lengths[i] = modifier.trimmed_bases
elif isinstance(modifier, AdapterCutter):
- assert self.with_adapters[i] is None
- self.with_adapters[i] = modifier.with_adapters
- self.adapter_stats[i] = list(modifier.adapter_statistics.values())
+ if self.with_adapters[i] is None:
+ self.with_adapters[i] = modifier.with_adapters
+ self.adapter_stats[i] = list(modifier.adapter_statistics.values())
+ else:
+ self.with_adapters[i] += modifier.with_adapters # type: ignore
+ self.adapter_stats[i] += list(modifier.adapter_statistics.values())
elif isinstance(modifier, ReverseComplementer):
- assert self.with_adapters[i] is None
- self.with_adapters[i] = modifier.adapter_cutter.with_adapters
- self.adapter_stats[i] = list(
- modifier.adapter_cutter.adapter_statistics.values()
- )
- self.reverse_complemented = modifier.reverse_complemented
+ if self.with_adapters[i] is None:
+ self.with_adapters[i] = modifier.adapter_cutter.with_adapters
+ self.adapter_stats[i] = list(
+ modifier.adapter_cutter.adapter_statistics.values()
+ )
+ self.reverse_complemented = modifier.reverse_complemented
+ else:
+ assert self.with_adapters[i] is not None
+ self.with_adapters[i] += modifier.adapter_cutter.with_adapters # type: ignore
+ self.adapter_stats[i] += list(
+ modifier.adapter_cutter.adapter_statistics.values()
+ )
+ self.reverse_complemented = add_if_not_none(
+ self.reverse_complemented, modifier.reverse_complemented
+ )
def as_json(self, gc_content: float = 0.5, one_line: bool = False) -> Dict:
"""
|
marcelm/cutadapt
|
b3698c259bef4b68caf0c9fb9b3fb38646b82d0c
|
diff --git a/tests/test_api.py b/tests/test_api.py
index 1add55e..2df132b 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -154,3 +154,31 @@ def test_pipeline_paired(tmp_path, cores):
# - too many submodules (flatter namespace)
# - use xopen directly instead of file_opener;
# possibly with myxopen = functools.partial(xopen, ...)
+
+
+def test_two_adapter_cutters_and_reverse_complementer(tmp_path):
+ from cutadapt.pipeline import SingleEndPipeline
+ from cutadapt.files import OutputFiles, InputPaths
+ from cutadapt.modifiers import AdapterCutter, ReverseComplementer
+ from cutadapt.adapters import BackAdapter
+
+ adapter = BackAdapter(sequence="GATCGGAAGA")
+ modifiers = [
+ AdapterCutter([adapter]),
+ AdapterCutter([adapter]),
+ ReverseComplementer(AdapterCutter([adapter])),
+ ]
+ inpaths = InputPaths(datapath("small.fastq"))
+ with make_runner(inpaths, cores=1) as runner:
+ outfiles = OutputFiles(
+ proxied=False,
+ qualities=True,
+ interleaved=False,
+ )
+ steps = [SingleEndSink(outfiles.open_record_writer(tmp_path / "out.fastq"))]
+ pipeline = SingleEndPipeline(modifiers, steps)
+ stats = runner.run(pipeline, DummyProgress(), outfiles)
+ outfiles.close()
+
+ assert stats is not None
+ assert len(stats.as_json()["adapters_read1"]) == 3
|
Pipe multiple adatper trimming steps is unsupported when using the python API.
When using the API code in cutadapt, multiple adapter trimming steps will cause error.
For example given a NGS sequencing library with the following scheme:
`(p5 adapter)-(UMI)-(inner adapter)-(insert sequence)-(p7 adapter)`
We need to cut `p5 adapter` by `-a`, then cut UMI sequence by `-u`, then cut inner adapter again by `-a`. Since `-a` is triggered twice, cutadapter will raise an error.
https://github.com/marcelm/cutadapt/blob/33df54550e9d789e03fb9e10a98d18a2a8d71b94/src/cutadapt/report.py#L180
|
0.0
|
b3698c259bef4b68caf0c9fb9b3fb38646b82d0c
|
[
"tests/test_api.py::test_two_adapter_cutters_and_reverse_complementer"
] |
[
"tests/test_api.py::test_main_without_sys_stdout_buffer_available",
"tests/test_api.py::test_command_line",
"tests/test_api.py::test_pipeline_single[1]",
"tests/test_api.py::test_pipeline_single[2]",
"tests/test_api.py::test_pipeline_paired[1]",
"tests/test_api.py::test_pipeline_paired[2]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-04-23 13:53:38+00:00
|
mit
| 3,712 |
|
marcharper__python-ternary-136
|
diff --git a/ternary/helpers.py b/ternary/helpers.py
index a72cbb9..06c4ae7 100644
--- a/ternary/helpers.py
+++ b/ternary/helpers.py
@@ -103,6 +103,26 @@ def project_point(p, permutation=None):
return numpy.array([x, y])
+def planar_to_coordinates(p,scale):
+ """
+ Planar simplex (regular x,y) to maps (x,y,z) ternary coordinates. The order of the coordinates is counterclockwise from the origin.
+
+ Parameters
+ ----------
+ p: 2-tuple
+ The planar simplex (x, y) point to be transformed to maps (x,y,z) coordinates
+
+ scale: Int
+ The normalized scale of the simplex, i.e. N such that points (x,y,z)
+ satisify x + y + z == N
+
+ """
+ x = p[0] - y/2
+ y = p[1] / SQRT3OVER2
+ z = scale - (x + y)
+ return numpy.array([x, y, z])
+
+
def project_sequence(s, permutation=None):
"""
Projects a point or sequence of points using `project_point` to lists xs, ys
diff --git a/ternary/ternary_axes_subplot.py b/ternary/ternary_axes_subplot.py
index 85a8ecf..629a223 100644
--- a/ternary/ternary_axes_subplot.py
+++ b/ternary/ternary_axes_subplot.py
@@ -441,7 +441,7 @@ class TernaryAxesSubplot(object):
cb_kwargs=cb_kwargs)
def heatmapf(self, func, scale=None, cmap=None, boundary=True,
- style='triangular', colorbar=True, scientific=True,
+ style='triangular', colorbar=True, scientific=False,
vmin=None, vmax=None, cbarlabel=None, cb_kwargs=None):
if not scale:
scale = self.get_scale()
|
marcharper/python-ternary
|
62dda5d12e19752437a6c76bbf43bc954742a6b3
|
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 2fe9640..4353c2b 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -3,7 +3,7 @@ import unittest
from numpy.testing import assert_array_equal
-from ternary.helpers import normalize, project_point, simplex_iterator, SQRT3OVER2
+from ternary.helpers import normalize, project_point, planar_to_coordinates, simplex_iterator, SQRT3OVER2
class FunctionCases(unittest.TestCase):
@@ -84,6 +84,32 @@ class FunctionCases(unittest.TestCase):
expected = (1.5, SQRT3OVER2)
assert_array_equal(projected, expected)
+ def test_planar_to_coordinates(self):
+ projected = (0.0, 0.0)
+ point = planar_to_coordinates(projected, scale = 100)
+ expected = (0.0, 0.0, 100.0)
+ assert_array_equal(point, expected)
+
+ projected = (100.0, 0.0)
+ point = planar_to_coordinates(projected, scale = 100)
+ expected = (100.0, 0.0, 0.0)
+ assert_array_equal(point, expected)
+
+ projected = (40.0,0)
+ point = planar_to_coordinates(projected, scale = 100)
+ expected = (40.0, 0.0 , 60.0)
+ assert_array_equal(point, expected)
+
+ projected = (10.0, SQRT3OVER2)
+ point = planar_to_coordinates(projected, scale = 100)
+ expected = (9.5, 1.0 , 89.5)
+ assert_array_equal(point, expected)
+
+ projected = (10.0, SQRT3OVER2)
+ point = planar_to_coordinates(projected, scale = 100)
+ expected = (9.5, 1.0 , 89.5)
+ assert_array_equal(point, expected)
+
if __name__ == "__main__":
unittest.main()
|
Planar to coordinates
Hello!
How to convert coordinates from planar simplex to maps (x,y,z) ternary?
|
0.0
|
62dda5d12e19752437a6c76bbf43bc954742a6b3
|
[
"tests/test_helpers.py::FunctionCases::test_normalize",
"tests/test_helpers.py::FunctionCases::test_project_point",
"tests/test_helpers.py::FunctionCases::test_simplex_iterator",
"tests/test_helpers.py::FunctionCases::test_simplex_iterator_without_boundary"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-07 02:52:26+00:00
|
mit
| 3,713 |
|
marcoplaisier__QuotePad-27
|
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..e29ccdf
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,12 @@
+language: python
+matrix:
+ include:
+ - python: 3.6
+ - python: 3.7
+ dist: xenial
+ sudo: true
+install:
+- pip install -r requirements/test.txt
+- pip install -e .
+script:
+- pytest
diff --git a/app.py b/app.py
index 4b059ec..e9302f5 100644
--- a/app.py
+++ b/app.py
@@ -1,11 +1,21 @@
-from flask import Flask
+from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
-def hello_world():
- return 'Hello World!'
+def homepage():
+ return render_template("homepage.html")
+
+
[email protected]('/create')
+def create():
+ return render_template("create.html")
+
+
[email protected]('/index')
+def index():
+ return render_template("index.html")
if __name__ == '__main__':
diff --git a/templates/base.html b/templates/base.html
new file mode 100644
index 0000000..75002ff
--- /dev/null
+++ b/templates/base.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="UTF-8">
+ <title>Setting your texts alight - Quotepad</title>
+ </head>
+
+ <body>
+ {% block content %}{% endblock %}
+ </body>
+</html>
\ No newline at end of file
diff --git a/templates/create.html b/templates/create.html
new file mode 100644
index 0000000..3df7d9b
--- /dev/null
+++ b/templates/create.html
@@ -0,0 +1,5 @@
+{% extends "base.html" %}
+
+{% block content %}
+<p>Create a new template placeholder</p>
+{% endblock %}
\ No newline at end of file
diff --git a/templates/homepage.html b/templates/homepage.html
new file mode 100644
index 0000000..ab84e68
--- /dev/null
+++ b/templates/homepage.html
@@ -0,0 +1,10 @@
+{% extends "base.html" %}
+
+{% block content %}
+<p>Create, update and send your texts to your display</p>
+<ul>
+ <li><a id="new_text" href="{{ url_for("create") }}">New text</a></li>
+ <li><a id="overview" href="{{ url_for("index") }}">Existing texts</a></li>
+</ul>
+<p>Click on the links above to create a new text or see a list of existing texts.</p>
+{% endblock %}
\ No newline at end of file
diff --git a/templates/index.html b/templates/index.html
new file mode 100644
index 0000000..cddbe83
--- /dev/null
+++ b/templates/index.html
@@ -0,0 +1,5 @@
+{% extends "base.html" %}
+
+{% block content %}
+<p>All texts placeholder</p>
+{% endblock %}
\ No newline at end of file
|
marcoplaisier/QuotePad
|
a3246986ea24b2c4db79b74d6330e7bd636a8988
|
diff --git a/requirements/test.txt b/requirements/test.txt
index 8c3c0cc..b8ca54f 100644
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,6 +1,5 @@
-r common.txt
pytest
-behave
bump
twine
setuptools
diff --git a/tests/test_flask.py b/tests/test_flask.py
index bc3a15e..9ade764 100644
--- a/tests/test_flask.py
+++ b/tests/test_flask.py
@@ -10,4 +10,4 @@ def client():
def test_simple(client):
t = client.get('/')
- assert b'Hello' in t.data
+ assert b'Setting your texts alight - Quotepad' in t.data
|
Homepage
Create a basic homepage with:
- button to create new text
- some simple explanation text
- link to another page to view the texts
|
0.0
|
a3246986ea24b2c4db79b74d6330e7bd636a8988
|
[
"tests/test_flask.py::test_simple"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-10 06:12:44+00:00
|
mit
| 3,714 |
|
marcosschroh__dataclasses-avroschema-172
|
diff --git a/dataclasses_avroschema/fields.py b/dataclasses_avroschema/fields.py
index bb973af..8ec1397 100644
--- a/dataclasses_avroschema/fields.py
+++ b/dataclasses_avroschema/fields.py
@@ -685,8 +685,16 @@ class UUIDField(LogicalTypeField):
@dataclasses.dataclass
class RecordField(BaseField):
def get_avro_type(self) -> typing.Union[typing.List, typing.Dict]:
- alias = self.parent.metadata.get_alias(self.name) or self.model_metadata.get_alias(self.name) # type: ignore
- name = alias or self.type.__name__
+ meta = getattr(self.type, "Meta", None)
+ metadata = utils.SchemaMetadata.create(meta)
+
+ alias = self.parent.metadata.get_alias_nested_items(self.name) or metadata.get_alias_nested_items(self.name) # type: ignore # noqa E501
+
+ # The priority for the schema name
+ # 1. Check if the schema_name is present in the Meta class of own model
+ # 2. Check if exists an alias_nested_items in parent class or Meta class of own model
+ # 3. Use the default class Name (self.type.__name__)
+ name = metadata.schema_name or alias or self.type.__name__
if not self.exist_type():
user_defined_type = utils.UserDefinedType(name=name, type=self.type)
@@ -697,9 +705,6 @@ class RecordField(BaseField):
record_type = self.type.avro_schema_to_python(root=self.parent)
record_type["name"] = name
else:
- meta = getattr(self.type, "Meta", None)
- metadata = utils.SchemaMetadata.create(meta)
-
if metadata.namespace is None:
raise NameSpaceRequiredException(field_type=self.type, field_name=self.name)
record_type = f"{metadata.namespace}.{name}"
diff --git a/dataclasses_avroschema/utils.py b/dataclasses_avroschema/utils.py
index 606cb63..0d3722f 100644
--- a/dataclasses_avroschema/utils.py
+++ b/dataclasses_avroschema/utils.py
@@ -90,7 +90,7 @@ class SchemaMetadata:
alias_nested_items=getattr(klass, "alias_nested_items", {}),
)
- def get_alias(self, name: str) -> typing.Optional[str]:
+ def get_alias_nested_items(self, name: str) -> typing.Optional[str]:
return self.alias_nested_items.get(name)
|
marcosschroh/dataclasses-avroschema
|
d88afb8df16039cf86fc0598e2d4c0431613987a
|
diff --git a/tests/schemas/test_schema.py b/tests/schemas/test_schema.py
index b9252ee..d1f6c20 100644
--- a/tests/schemas/test_schema.py
+++ b/tests/schemas/test_schema.py
@@ -151,6 +151,24 @@ def test_get_fields():
assert Parent.fake()
+def test_schema_name_from_relationship():
+ @dataclass
+ class MyClass(AvroModel):
+ field_1: str
+
+ class Meta:
+ schema_name = "custom_class"
+
+ class MySecondClass(AvroModel):
+ field_2: MyClass
+
+ class Meta:
+ schema_name = "custom_name"
+
+ schema = MySecondClass.avro_schema_to_python()
+ assert schema["fields"][0]["type"]["name"] == "custom_class"
+
+
def test_validate():
@dataclass
class User(AvroModel):
|
Nested metadata not respected
**Describe the bug**
If I override a `schema_name` attribute for a class that's used as a field, that schema_name isn't respected.
**To Reproduce**
```python
from dataclasses_avroschema import AvroModel
from dataclasses import dataclass
@dataclass
class MyClass(AvroModel):
field_1: str
class Meta:
schema_name = "custom_class" # <-- this is not respected
class MySecondClass(AvroModel):
field_2: MyClass
class Meta:
schema_name = "custom_name"
MySecondClass.avro_schema_to_python()
```
This outputs
```python
{'type': 'record',
'name': 'custom_name',
'fields': [{'name': 'field_2',
'type': {'type': 'record',
'name': 'MyClass', # <-- this line is wrong
'fields': [{'name': 'field_1', 'type': 'string'}],}}],
}
```
**Expected behavior**
I would expect
```python
{'type': 'record',
'name': 'custom_name',
'fields': [{'name': 'field_2',
'type': {'type': 'record',
'name': 'custom_class', # This is the important line
'fields': [{'name': 'field_1', 'type': 'string'}],}}],
}
```
|
0.0
|
d88afb8df16039cf86fc0598e2d4c0431613987a
|
[
"tests/schemas/test_schema.py::test_schema_name_from_relationship"
] |
[
"tests/schemas/test_schema.py::test_total_schema_fields_from_class",
"tests/schemas/test_schema.py::test_total_schema_fields_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_field_metadata",
"tests/schemas/test_schema.py::test_schema_render_from_class",
"tests/schemas/test_schema.py::test_schema_render_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_doc",
"tests/schemas/test_schema.py::test_schema_render_from_instance_with_doc",
"tests/schemas/test_schema.py::test_schema_documentation",
"tests/schemas/test_schema.py::test_schema_cached",
"tests/schemas/test_schema.py::test_extra_avro_attributes",
"tests/schemas/test_schema.py::test_class_empty_metaclass",
"tests/schemas/test_schema.py::test_invalid_schema_type",
"tests/schemas/test_schema.py::test_not_implementd_methods",
"tests/schemas/test_schema.py::test_namespace_required",
"tests/schemas/test_schema.py::test_inherit_dataclass_missing_docs",
"tests/schemas/test_schema.py::test_get_fields",
"tests/schemas/test_schema.py::test_validate",
"tests/schemas/test_schema.py::test_get_enum_type_map",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_unions",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_sub_record",
"tests/schemas/test_schema.py::test_deserialize_complex_types",
"tests/schemas/test_schema.py::test_deserialize_complex_types_invalid_enum_instance",
"tests/schemas/test_schema.py::test_parse_obj"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-04-15 14:27:12+00:00
|
mit
| 3,715 |
|
marcosschroh__dataclasses-avroschema-236
|
diff --git a/dataclasses_avroschema/model_generator/generator.py b/dataclasses_avroschema/model_generator/generator.py
index 3c9326c..8a5b4ee 100644
--- a/dataclasses_avroschema/model_generator/generator.py
+++ b/dataclasses_avroschema/model_generator/generator.py
@@ -253,8 +253,9 @@ class ModelGenerator:
return field_repr
return avro_to_python_utils.AVRO_TYPE_TO_PYTHON[logical_type]
- @staticmethod
- def _parse_decimal(*, field: JsonDict) -> str:
+ def _parse_decimal(self, *, field: JsonDict) -> str:
+ self.imports.add("from dataclasses_avroschema import types")
+
schema: JsonDict = field["type"]
precision = schema["precision"]
scale = schema["scale"]
@@ -295,11 +296,6 @@ class ModelGenerator:
language_types = self.parse_union(field_types=field_types[1:], model_name=model_name)
elif isinstance(field_types[1], dict):
# TODO: check what happens with more than 2 complex types
- # it is a complex type like array, dict, enum, fixed or record
- # it needs to be render
- # if self._is_logical_type(field_type=field_types[1]):
- # language_types = self.parse_logical_type(field=field_types[1])
- # else:
language_types = self.render_field(field=field_types[1], model_name=model_name)
else:
language_types = ", ".join(
|
marcosschroh/dataclasses-avroschema
|
c5a9f9b6bd3fba8301a57787605b84ef06724c63
|
diff --git a/tests/model_generator/conftest.py b/tests/model_generator/conftest.py
index a9b7af6..f3f75cc 100644
--- a/tests/model_generator/conftest.py
+++ b/tests/model_generator/conftest.py
@@ -313,6 +313,15 @@ def schema_one_to_self_relationship() -> JsonDict:
}
[email protected]
+def schema_with_decimal_field() -> JsonDict:
+ return {
+ "type": "record",
+ "name": "demo",
+ "fields": [{"name": "foo", "type": {"type": "bytes", "logicalType": "decimal", "precision": 10, "scale": 3}}],
+ }
+
+
@pytest.fixture
def schema_with_logical_types() -> JsonDict:
return {
diff --git a/tests/model_generator/test_model_generator.py b/tests/model_generator/test_model_generator.py
index 36b46e7..4864b5a 100644
--- a/tests/model_generator/test_model_generator.py
+++ b/tests/model_generator/test_model_generator.py
@@ -1,5 +1,4 @@
-from dataclasses_avroschema import ModelGenerator, types, field_utils
-
+from dataclasses_avroschema import ModelGenerator, field_utils, types
from dataclasses_avroschema.model_generator.avro_to_python_utils import render_datetime
@@ -282,6 +281,23 @@ class User(AvroModel):
assert result.strip() == expected_result.strip()
+def test_decimal_field(schema_with_decimal_field: types.JsonDict) -> None:
+ expected_result = """
+from dataclasses_avroschema import AvroModel
+from dataclasses_avroschema import types
+import dataclasses
+import decimal
+
+
[email protected]
+class Demo(AvroModel):
+ foo: decimal.Decimal = types.Decimal(scale=3, precision=10)
+"""
+ model_generator = ModelGenerator()
+ result = model_generator.render(schema=schema_with_decimal_field)
+ assert result.strip() == expected_result.strip()
+
+
def test_schema_logical_types(schema_with_logical_types: types.JsonDict) -> None:
release_datetime = render_datetime(value=1570903062000, format=field_utils.TIMESTAMP_MILLIS)
release_datetime_micro = render_datetime(value=1570903062000000, format=field_utils.TIMESTAMP_MICROS)
|
ModelGenerator's output raises NameError for decimal fields
**Describe the bug**
Code generated by the `ModelGenerator` for schemas containing a decimal logical type field cannot run due to a missing import.
**To Reproduce**
```py
import dataclasses_avroschema
schema = {
"type": "record",
"name": "demo",
"fields": [
{
"name": "foo",
"type": {
"type": "bytes",
"logicalType": "decimal",
"precision": 10,
"scale": 3
}
}
]
}
code = dataclasses_avroschema.ModelGenerator().render(schema=schema)
with open("/tmp/generated_code.py", "w") as f:
print(code, file=f)
```
```sh
python3 /tmp/generated_code.py
```
```
Traceback (most recent call last):
File "/tmp/generated_code.py", line 7, in <module>
class Demo(AvroModel):
File "/tmp/generated_code.py", line 8, in Demo
foo: decimal.Decimal = types.Decimal(scale=3, precision=10)
^^^^^
NameError: name 'types' is not defined. Did you mean: 'type'?
```
**Expected behavior**
Loadable code without the `NameError`.
|
0.0
|
c5a9f9b6bd3fba8301a57787605b84ef06724c63
|
[
"tests/model_generator/test_model_generator.py::test_decimal_field"
] |
[
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_with_default_null",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_with_unions",
"tests/model_generator/test_model_generator.py::test_model_generator_array_type",
"tests/model_generator/test_model_generator.py::test_model_generator_map_type",
"tests/model_generator/test_model_generator.py::test_schema_with_fixed_types",
"tests/model_generator/test_model_generator.py::test_schema_with_enum_types",
"tests/model_generator/test_model_generator.py::test_schema_one_to_one_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_many_array_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_many_map_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_self_relathionship",
"tests/model_generator/test_model_generator.py::test_schema_logical_types",
"tests/model_generator/test_model_generator.py::test_model_generator_render_module_from_multiple_schemas"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-01-19 07:56:33+00:00
|
mit
| 3,716 |
|
marcosschroh__dataclasses-avroschema-243
|
diff --git a/dataclasses_avroschema/fields.py b/dataclasses_avroschema/fields.py
index 4e2aac5..c263d29 100644
--- a/dataclasses_avroschema/fields.py
+++ b/dataclasses_avroschema/fields.py
@@ -38,7 +38,6 @@ p = inflect.engine()
@dataclasses.dataclass # type: ignore
class BaseField:
-
__slots__ = (
"name",
"type",
@@ -489,7 +488,6 @@ class EnumField(BaseField):
return f"{namespace}.{name}"
def get_default_value(self) -> typing.Union[str, dataclasses._MISSING_TYPE, None]:
-
if self.default == types.MissingSentinel:
return dataclasses.MISSING
elif self.default in (dataclasses.MISSING, None):
@@ -786,7 +784,6 @@ class RecordField(BaseField):
@dataclasses.dataclass
class DecimalField(BaseField):
-
precision: int = -1
scale: int = 0
@@ -796,8 +793,8 @@ class DecimalField(BaseField):
def set_precision_scale(self) -> None:
if self.default != types.MissingSentinel:
if isinstance(self.default, decimal.Decimal):
- sign, digits, scale = self.default.as_tuple()
- self.scale = scale * -1 # Make scale positive, as that's what Avro expects
+ _, digits, scale = self.default.as_tuple()
+ self.scale = int(scale) * -1 # Make scale positive, as that's what Avro expects
# decimal.Context has a precision property
# BUT the precision property is independent of the number of digits stored in the Decimal instance
# # # FROM THE DOCS HERE https://docs.python.org/3/library/decimal.html
diff --git a/dataclasses_avroschema/schema_definition.py b/dataclasses_avroschema/schema_definition.py
index da419e4..103cf65 100644
--- a/dataclasses_avroschema/schema_definition.py
+++ b/dataclasses_avroschema/schema_definition.py
@@ -49,7 +49,6 @@ class BaseSchemaDefinition(abc.ABC):
@dataclasses.dataclass
class AvroSchemaDefinition(BaseSchemaDefinition):
-
fields: typing.List[FieldType] = dataclasses.field(default_factory=list)
def __post_init__(self) -> None:
diff --git a/dataclasses_avroschema/schema_generator.py b/dataclasses_avroschema/schema_generator.py
index 8ede7d7..36a746c 100644
--- a/dataclasses_avroschema/schema_generator.py
+++ b/dataclasses_avroschema/schema_generator.py
@@ -44,7 +44,7 @@ class AvroModel:
@classmethod
def generate_schema(cls: Type[CT], schema_type: str = "avro") -> Optional[OrderedDict]:
- if cls.schema_def is None:
+ if cls.schema_def is None or cls.__mro__[1] != AvroModel:
# Generate dataclass and metadata
cls.klass = cls.generate_dataclass()
@@ -170,7 +170,6 @@ class AvroModel:
create_instance: bool = True,
writer_schema: Optional[Union[JsonDict, Type[CT]]] = None,
) -> Union[JsonDict, CT]:
-
if inspect.isclass(writer_schema) and issubclass(writer_schema, AvroModel):
# mypy does not undersdtand redefinitions
writer_schema: JsonDict = writer_schema.avro_schema_to_python() # type: ignore
diff --git a/dataclasses_avroschema/serialization.py b/dataclasses_avroschema/serialization.py
index a5c3c81..ae32030 100644
--- a/dataclasses_avroschema/serialization.py
+++ b/dataclasses_avroschema/serialization.py
@@ -106,7 +106,7 @@ def prepare_bytes_decimal(data: decimal.Decimal, precision: int, scale: int = 0)
if len(digits) > precision:
raise ValueError("The decimal precision is bigger than allowed by schema")
- delta = exp + scale
+ delta = int(exp) + scale
if delta < 0:
raise ValueError("Scale provided in schema does not match the decimal")
|
marcosschroh/dataclasses-avroschema
|
2a100666c93afb3f4916ea84b2ed9904b71a3632
|
diff --git a/tests/schemas/test_schema.py b/tests/schemas/test_schema.py
index 05f63be..bde9754 100644
--- a/tests/schemas/test_schema.py
+++ b/tests/schemas/test_schema.py
@@ -303,7 +303,7 @@ def test_parse_obj():
assert User.avro_schema()
-def test_inheritance(user_avro_json: JsonDict) -> None:
+def test_avro_schema_to_python_method_with_inheritance(user_avro_json: JsonDict) -> None:
@dataclass
class Parent(AvroModel):
name: str
@@ -325,3 +325,34 @@ def test_inheritance(user_avro_json: JsonDict) -> None:
child_2_schema = Child2.avro_schema_to_python()
assert child_schema["fields"] == child_2_schema["fields"] == user_avro_json["fields"]
+
+
+def test_avro_schema_method_with_inheritance() -> None:
+ @dataclass
+ class Common(AvroModel):
+ some_data: str
+
+ @dataclass
+ class DerivedA(Common):
+ some_more_data_A: str
+
+ @dataclass
+ class DerivedB(Common):
+ some_more_data_B: str
+
+ common_schema = Common.avro_schema()
+ derived_a_schema = DerivedA.avro_schema()
+ derived_b_schema = DerivedB.avro_schema()
+
+ assert (
+ common_schema
+ == '{"type": "record", "name": "Common", "fields": [{"name": "some_data", "type": "string"}], "doc": "Common(some_data: str)"}'
+ )
+ assert (
+ derived_a_schema
+ == '{"type": "record", "name": "DerivedA", "fields": [{"name": "some_data", "type": "string"}, {"name": "some_more_data_A", "type": "string"}], "doc": "DerivedA(some_data: str, some_more_data_A: str)"}'
+ )
+ assert (
+ derived_b_schema
+ == '{"type": "record", "name": "DerivedB", "fields": [{"name": "some_data", "type": "string"}, {"name": "some_more_data_B", "type": "string"}], "doc": "DerivedB(some_data: str, some_more_data_B: str)"}'
+ )
|
Problem with persistent schema and model inheritance
Thanks for this great package! I use it a lot for registering Faust Records in a schema registry. A problem that I noticed is that the Avro schemes are not correct when complex models are built which inherit from other models.
**Describe the bug**
When a model is subclassed from another model, the generated schema is persisted from the parent model. This can lead to some strange behavior:
**To Reproduce**
```python
@dataclasses.dataclass
class Common(AvroModel):
some_data: str
@dataclasses.dataclass
class DerivedA(Common):
some_more_data_A: str
@dataclasses.dataclass
class DerivedB(Common):
some_more_data_B: str
print(Common.avro_schema())
{"type": "record", "name": "Common", "fields": [{"name": "some_data", "type": "string"}], "doc": "Common(some_data: str)"}
print(DerivedA.avro_schema())
{"type": "record", "name": "Common", "fields": [{"name": "some_data", "type": "string"}], "doc": "Common(some_data: str)"}
print(DerivedB.avro_schema())
{"type": "record", "name": "Common", "fields": [{"name": "some_data", "type": "string"}], "doc": "Common(some_data: str)"}
```
It is different, if the schema for Common is not generated:
```python
#print(Common.avro_schema())
print(DerivedA.avro_schema())
{"type": "record", "name": "DerivedA", "fields": [{"name": "some_data", "type": "string"}, {"name": "some_more_data_A", "type": "string"}], "doc": "DerivedA(some_data: str, some_more_data_A: str)"}
print(DerivedB.avro_schema())
{"type": "record", "name": "DerivedB", "fields": [{"name": "some_data", "type": "string"}, {"name": "some_more_data_B", "type": "string"}], "doc": "DerivedB(some_data: str, some_more_data_B: str)"}
```
This is the result that is expected
**Expected behavior**
See above. I believe the problem occurs due to this line here in ``schema_generator.py``:
```python
@classmethod
def generate_schema(cls: Type[CT], schema_type: str = "avro") -> Optional[OrderedDict]:
if cls.schema_def is None: # <-- schema is persisted
```
I don't know what would be the pythonic way to approach this. A simple quick fix when using this is simply to set the schema of the base class to None before generating the dependent schemes:
```python
print(Common.avro_schema())
{"type": "record", "name": "Common", "fields": [{"name": "some_data", "type": "string"}], "doc": "Common(some_data: str)"}
Common.schema_def=None
print(DerivedA.avro_schema())
{"type": "record", "name": "DerivedA", "fields": [{"name": "some_data", "type": "string"}, {"name": "some_more_data_A", "type": "string"}], "doc": "DerivedA(some_data: str, some_more_data_A: str)"}
Common.schema_def=None
print(DerivedB.avro_schema())
{"type": "record", "name": "DerivedB", "fields": [{"name": "some_data", "type": "string"}, {"name": "some_more_data_B", "type": "string"}], "doc": "DerivedB(some_data: str, some_more_data_B: str)"}
```
|
0.0
|
2a100666c93afb3f4916ea84b2ed9904b71a3632
|
[
"tests/schemas/test_schema.py::test_avro_schema_method_with_inheritance"
] |
[
"tests/schemas/test_schema.py::test_total_schema_fields_from_class",
"tests/schemas/test_schema.py::test_total_schema_fields_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_field_metadata",
"tests/schemas/test_schema.py::test_schema_render_from_class",
"tests/schemas/test_schema.py::test_schema_render_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_doc",
"tests/schemas/test_schema.py::test_schema_render_from_instance_with_doc",
"tests/schemas/test_schema.py::test_schema_doc_from_meta",
"tests/schemas/test_schema.py::test_schema_cached",
"tests/schemas/test_schema.py::test_extra_avro_attributes",
"tests/schemas/test_schema.py::test_class_empty_metaclass",
"tests/schemas/test_schema.py::test_invalid_schema_type",
"tests/schemas/test_schema.py::test_not_implementd_methods",
"tests/schemas/test_schema.py::test_namespace_required",
"tests/schemas/test_schema.py::test_inherit_dataclass_missing_docs",
"tests/schemas/test_schema.py::test_get_fields",
"tests/schemas/test_schema.py::test_schema_name_from_relationship",
"tests/schemas/test_schema.py::test_alias_from_relationship",
"tests/schemas/test_schema.py::test_validate",
"tests/schemas/test_schema.py::test_get_enum_type_map",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_unions",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_sub_record",
"tests/schemas/test_schema.py::test_deserialize_complex_types",
"tests/schemas/test_schema.py::test_deserialize_complex_types_invalid_enum_instance",
"tests/schemas/test_schema.py::test_parse_obj",
"tests/schemas/test_schema.py::test_avro_schema_to_python_method_with_inheritance"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-14 15:00:46+00:00
|
mit
| 3,717 |
|
marcosschroh__dataclasses-avroschema-244
|
diff --git a/dataclasses_avroschema/fields.py b/dataclasses_avroschema/fields.py
index c263d29..74a22a9 100644
--- a/dataclasses_avroschema/fields.py
+++ b/dataclasses_avroschema/fields.py
@@ -287,23 +287,22 @@ class ListField(ContainerField):
items_type = self.type.__args__[0]
if utils.is_union(items_type):
- self.items_type = UnionField(
+ self.internal_field = UnionField(
self.name,
items_type,
default=self.default,
default_factory=self.default_factory,
model_metadata=self.model_metadata,
parent=self.parent,
- ).get_avro_type()
+ )
else:
self.internal_field = AvroField(
self.name, items_type, model_metadata=self.model_metadata, parent=self.parent
)
- self.items_type = self.internal_field.get_avro_type()
+
+ self.items_type = self.internal_field.get_avro_type()
def fake(self) -> typing.List:
- # return a list of one element with the type specified
- # TODO: check when the internal value is self reference which seems to return `None`
return [self.internal_field.fake()]
|
marcosschroh/dataclasses-avroschema
|
b8ccaaac4c38168644f8e4c60e50f2f97246b9eb
|
diff --git a/tests/fake/test_fake.py b/tests/fake/test_fake.py
index 596adfb..c81f8f1 100644
--- a/tests/fake/test_fake.py
+++ b/tests/fake/test_fake.py
@@ -155,6 +155,7 @@ def test_self_one_to_many_relationship() -> None:
class User(AvroModel):
name: str
age: int
+ points: typing.List[typing.Optional[types.Float32]]
teamates: typing.Optional[typing.List[typing.Type["User"]]] = None
assert isinstance(User.fake(), User)
|
AttributeError: 'NoneType' object has no attribute 'fake'
**Describe the bug**
In my pydantic data model I have the following attribute `X: List[Optional[types.Float32]]`. When I call fake() of this class I get the error 'AttributeError: 'NoneType' object has no attribute 'fake''
**To Reproduce**
```py
from dataclasses_avroschema import types
from dataclasses_avroschema.avrodantic import AvroBaseModel
from typing import List, Optional, Union
class Model(AvroBaseModel):
A: str
B: types.Float32
X: List[Optional[types.Float32]]
Y: List[Union[types.Float32, None]]
print(Model.fake())
```
**Expected behavior**
`A='BCGUpoVuoBhZZsPxLBTL' B=3.97300348265236 X=[0.22, 0.3, None] Y=[90.4, 3472.0, None]`
|
0.0
|
b8ccaaac4c38168644f8e4c60e50f2f97246b9eb
|
[
"tests/fake/test_fake.py::test_self_one_to_many_relationship"
] |
[
"tests/fake/test_fake.py::test_fake_primitive_types",
"tests/fake/test_fake.py::test_fake_complex_types",
"tests/fake/test_fake.py::test_fake_with_user_data",
"tests/fake/test_fake.py::test_fake_with_logical_types",
"tests/fake/test_fake.py::test_fake_union",
"tests/fake/test_fake.py::test_fake_one_to_one_relationship",
"tests/fake/test_fake.py::test_fake_one_to_many_relationship",
"tests/fake/test_fake.py::test_fake_one_to_many_map_relationship",
"tests/fake/test_fake.py::test_self_one_to_one_relationship",
"tests/fake/test_fake.py::test_self_one_to_many_map_relationship",
"tests/fake/test_fake.py::test_optional_relationship",
"tests/fake/test_fake.py::test_decimals",
"tests/fake/test_fake.py::test_int32",
"tests/fake/test_fake.py::test_float32"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-02-16 07:56:07+00:00
|
mit
| 3,718 |
|
marcosschroh__dataclasses-avroschema-253
|
diff --git a/dataclasses_avroschema/exceptions.py b/dataclasses_avroschema/exceptions.py
index f694907..079718a 100644
--- a/dataclasses_avroschema/exceptions.py
+++ b/dataclasses_avroschema/exceptions.py
@@ -1,22 +1,6 @@
import typing
-class NameSpaceRequiredException(Exception):
- def __init__(self, field_type: typing.Any, field_name: str) -> None:
- self.field_type = field_type
- self.field_name = field_name
-
- def __repr__(self) -> str:
- class_name = self.__class__.__name__
- return f"{class_name} {self.field_name},{self.field_type}"
-
- def __str__(self) -> str:
- return ( # pragma: no cover
- f"Required namespace in Meta for type {self.field_type}. "
- f"The field {self.field_name} is using an exiting type"
- )
-
-
class InvalidMap(Exception):
def __init__(self, field_name: str, key_type: typing.Any) -> None:
self.field_name = field_name
diff --git a/dataclasses_avroschema/fields.py b/dataclasses_avroschema/fields.py
index 74a22a9..fb4d62a 100644
--- a/dataclasses_avroschema/fields.py
+++ b/dataclasses_avroschema/fields.py
@@ -20,7 +20,7 @@ from pytz import utc
from dataclasses_avroschema import schema_generator, serialization, types, utils
from . import field_utils
-from .exceptions import InvalidMap, NameSpaceRequiredException
+from .exceptions import InvalidMap
from .types import JsonDict
PY_VER = sys.version_info
@@ -483,8 +483,9 @@ class EnumField(BaseField):
else:
namespace = metadata.get("namespace")
if namespace is None:
- raise NameSpaceRequiredException(field_type=self.type, field_name=name)
- return f"{namespace}.{name}"
+ return name
+ else:
+ return f"{namespace}.{name}"
def get_default_value(self) -> typing.Union[str, dataclasses._MISSING_TYPE, None]:
if self.default == types.MissingSentinel:
@@ -770,8 +771,9 @@ class RecordField(BaseField):
record_type["name"] = name
else:
if metadata.namespace is None:
- raise NameSpaceRequiredException(field_type=self.type, field_name=self.name)
- record_type = f"{metadata.namespace}.{name}"
+ record_type = name
+ else:
+ record_type = f"{metadata.namespace}.{name}"
if self.default is None:
return [field_utils.NULL, record_type]
diff --git a/docs/complex_types.md b/docs/complex_types.md
index 8e7f9b0..54cb245 100644
--- a/docs/complex_types.md
+++ b/docs/complex_types.md
@@ -71,7 +71,8 @@ User.avro_schema()
### Repeated Enums
-Sometimes we have cases where an `Enum` is used more than once with a particular class, for those cases, you `MUST` define the namespace in order to generate a valid `avro schema`
+Sometimes we have cases where an `Enum` is used more than once with a particular class, for those cases the same `type` is used in order to generate a valid schema.
+It is a good practice but *NOT* neccesary to a define the `namespace` on the repeated `type`.
```python
import enum
@@ -123,7 +124,7 @@ resulting in
"name": "optional_distance",
"type": [
"null",
- "trip.TripDistance"
+ "trip.TripDistance" // using the namespace and the TripDistance type
],
"default": null
}
@@ -132,9 +133,6 @@ resulting in
}
```
-!!! warning
- If you do not specify the `namespace` in the `Enum` the exception `NameSpaceRequiredException` is raised
-
## Arrays
```python title="Array example"
diff --git a/docs/good_practices.md b/docs/good_practices.md
index 5a8f5e2..c632df3 100644
--- a/docs/good_practices.md
+++ b/docs/good_practices.md
@@ -1,6 +1,6 @@
-## Streaming
+# Streaming
-### Schema server and AvroModel
+## Schema server and AvroModel
First, let's clarify what a schema server is: It is a `central place/repository` that contains schemas with formats like `avro`, `json` or `protobuf`, with the purpose of exposing them through an `API`, so applications can access them and `serialize/deserialize` events. The schema server could have a `RESTful` interface so tasks like `create`, `delete` `get` schemas can be performed easily.
@@ -29,8 +29,7 @@ class User(AvroModel):
The purpose of the `schema_id` is to give a fast notion what the model is representing. Also, could be used as `documentation`
-
-### Include event metadata
+## Include event metadata
`avro schemas` are used widely in `streaming` to `serialize` events, and with `dataclasses-avroschemas` it is straigtforward. Once
that you have the event, it is a good practice to also add the `event metadata` at the moment of `producing` so `consumers` will know what to do.
@@ -56,7 +55,7 @@ class User(AvroModel):
money: float = 100.3
class Meta:
- schema_id = "https://my-schema-server/users/schema.avsc" # or in a Concluent way: https://my-schema-server/schemas/ids/{int: id}
+ schema_id = "https://my-schema-server/users/schema.avsc" # or in a Confluent way: https://my-schema-server/schemas/ids/{int: id}
async def produce():
@@ -80,4 +79,14 @@ async def produce():
if __name__ == "__main__":
asyncio.run(produce)
-```
\ No newline at end of file
+```
+
+## Define Namespaces
+
+When there are types that are used more than once in a schema, for example `records` and `enums` it is a good practice to define `namespace` for the repeated type.
+This will allow you to identify more easily the `types`, specially if you have all the schemas in a `schema server` like `confluent`.
+
+Uses cases:
+
+- [Reusing types with records](https://marcosschroh.github.io/dataclasses-avroschema/schema_relationships/#avoid-name-collision-in-multiple-relationships)
+- [Reusing types with enums](https://marcosschroh.github.io/dataclasses-avroschema/complex_types/#repeated-enums)
diff --git a/docs/model_generator.md b/docs/model_generator.md
index cee7009..acec709 100644
--- a/docs/model_generator.md
+++ b/docs/model_generator.md
@@ -11,7 +11,7 @@ The rendered result is a string that contains the proper identation, so the resu
In future releases it will be possible to generate models for other programming langagues like `java` and `rust`
!!! note
- You can also use [dc-avro](https://github.com/marcosschroh/dc-avro)d to generate the models from the command line
+ You can also use [dc-avro](https://github.com/marcosschroh/dc-avro) to generate the models from the command line
## Usage
diff --git a/docs/schema_relationships.md b/docs/schema_relationships.md
index 9f4bbba..39cf3df 100644
--- a/docs/schema_relationships.md
+++ b/docs/schema_relationships.md
@@ -278,11 +278,10 @@ User.avro_schema()
## Avoid name collision in multiple relationships
-Sometimes we have relationships where a class is related more than once with a particular class,
-and the name for the nested schemas must be different, otherwise we will generate an invalid `avro schema`.
-For those cases, you *MUST* define the `namespace`.
+Sometimes we have relationships where a class is related more than once with a particular class.
+In those cases, the `predifne` type is used in order to generate a valid schema. It is a good practice but *NOT* neccesary to a define the `namespace` on the repeated `type`.
-```python title="Avoiding name collision example"
+```python title="Repetead types"
from dataclasses import dataclass
from datetime import datetime
import json
@@ -296,7 +295,7 @@ class Location(AvroModel):
longitude: float
class Meta:
- namespace = "types.location_type"
+ namespace = "types.location_type" # Good practise to use `namespaces`
@dataclass
class Trip(AvroModel):
@@ -333,7 +332,7 @@ Trip.avro_schema()
"type": {"type": "long", "logicalType": "timestamp-millis"}
},
{
- "name": "finish_location", "type": "types.location_type.Location" // using the namespace
+ "name": "finish_location", "type": "types.location_type.Location" // using the namespace and the Location type
}
],
"doc": "Trip(start_time: datetime.datetime, start_location: __main__.Location, finish_time: datetime.datetime, finish_location: __main__.Location)"
|
marcosschroh/dataclasses-avroschema
|
d2334ff743df40cf11ad427cab45de9b714c6920
|
diff --git a/tests/schemas/test_fastavro_paser_schema.py b/tests/schemas/test_fastavro_paser_schema.py
index 81ae619..5c09b80 100644
--- a/tests/schemas/test_fastavro_paser_schema.py
+++ b/tests/schemas/test_fastavro_paser_schema.py
@@ -152,6 +152,19 @@ def test_one_to_one_repeated_schema():
assert Trip.fake()
+def test_repeated_schema_without_namespace():
+ class Bus(AvroModel):
+ "A Bus"
+ engine_name: str
+
+ class UnionSchema(AvroModel):
+ "Some Unions"
+ bus_one: Bus
+ bus_two: Bus
+
+ parse_schema(UnionSchema.avro_schema_to_python())
+
+
def test_one_to_one_repeated_schema_in_array():
"""
Test relationship one-to-one with more than once schema
diff --git a/tests/schemas/test_schema.py b/tests/schemas/test_schema.py
index bde9754..9582de6 100644
--- a/tests/schemas/test_schema.py
+++ b/tests/schemas/test_schema.py
@@ -5,7 +5,7 @@ from dataclasses import dataclass
import pytest
from fastavro.validation import ValidationError
-from dataclasses_avroschema import AvroModel, exceptions
+from dataclasses_avroschema import AvroModel
from dataclasses_avroschema.schema_definition import BaseSchemaDefinition
from dataclasses_avroschema.types import JsonDict
@@ -123,20 +123,6 @@ def test_not_implementd_methods():
assert msg == str(excinfo.value)
-def test_namespace_required():
- class Bus(AvroModel):
- "A Bus"
- engine_name: str
-
- class UnionSchema(AvroModel):
- "Some Unions"
- bus_one: Bus
- bus_two: Bus
-
- with pytest.raises(exceptions.NameSpaceRequiredException):
- assert UnionSchema.avro_schema()
-
-
def test_inherit_dataclass_missing_docs():
@dataclass
class BaseUser:
diff --git a/tests/schemas/test_schema_with_complex_types.py b/tests/schemas/test_schema_with_complex_types.py
index 1fa4f1b..d8d6755 100644
--- a/tests/schemas/test_schema_with_complex_types.py
+++ b/tests/schemas/test_schema_with_complex_types.py
@@ -9,7 +9,7 @@ import uuid
import pytest
from fastavro import parse_schema
-from dataclasses_avroschema import AvroModel, exceptions
+from dataclasses_avroschema import AvroModel
from dataclasses_avroschema.types import JsonDict
PY_VER = sys.version_info
@@ -155,7 +155,7 @@ def test_schema_with_new_unions_defaults_syntax(default_union_schema: JsonDict)
assert User.avro_schema() == json.dumps(default_union_schema)
-def test_enum_namespace_required() -> None:
+def test_repeated_enum_without_namespace() -> None:
class UserType(enum.Enum):
BASIC = "Basic"
PREMIUM = "Premium"
@@ -165,8 +165,7 @@ def test_enum_namespace_required() -> None:
user_type: UserType
user_type_optional: typing.Optional[UserType]
- with pytest.raises(exceptions.NameSpaceRequiredException):
- User.avro_schema()
+ parse_schema(User.avro_schema_to_python())
# This is to explicitly test the behavior for a typing.Optional[T] field with no default
|
NameSpaceRequiredException when reusing child class
Hi there! First off, thank you for the fantastic library. 🍰
I'm running into the following issue in a migration to dataclasess-avroschema:
```python
from dataclasses_avroschema.avrodantic import AvroBaseModel
class ChildA(AvroBaseModel):
id: int
class ChildB(AvroBaseModel):
id: int
class Parent(AvroBaseModel):
x: ChildA | ChildB
y: ChildA | ChildB
class Meta:
namespace = "com.example"
d = Parent(x=ChildA(id=42), y=ChildB(id=43))
print(d.serialize(serialization_type="avro-json"))
```
```
File "C:\Python3.11\Lib\site-packages\dataclasses_avroschema\fields.py", line 773, in get_avro_type
raise NameSpaceRequiredException(field_type=self.type, field_name=self.name)
dataclasses_avroschema.exceptions.NameSpaceRequiredException: Required namespace in Meta for type <class '__main__.ChildA'>. The field y is using an exiting type
```
Removing `y` fixes the issue and correctly produces:
```json
{"x": {"com.example.ChildA": {"id": 42}}}
```
The problem seems to be that `exist_type` turns `True`, but I don't understand enough of the codebase to know what the correct fix is. 😃
|
0.0
|
d2334ff743df40cf11ad427cab45de9b714c6920
|
[
"tests/schemas/test_fastavro_paser_schema.py::test_repeated_schema_without_namespace",
"tests/schemas/test_schema_with_complex_types.py::test_repeated_enum_without_namespace"
] |
[
"tests/schemas/test_fastavro_paser_schema.py::test_minimal_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_field_metadata",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_extra_avro_attrs",
"tests/schemas/test_fastavro_paser_schema.py::test_advance_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_advance_schema_with_defaults",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_repeated_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_repeated_schema_in_array",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_repeated_schema_in_map",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_repeated_schema_in_array_and_map",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_with_map_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_one_self_relationship",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_self_reference_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_one_to_many_self_reference_map_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_logical_types_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_logical_micro_types_schema",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_union_types",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_with_union_record_types",
"tests/schemas/test_fastavro_paser_schema.py::test_schema_array_with_union_types",
"tests/schemas/test_fastavro_paser_schema.py::test_namespaces",
"tests/schemas/test_fastavro_paser_schema.py::test_use_of_same_type_in_nested_list",
"tests/schemas/test_fastavro_paser_schema.py::test_two_different_child_records",
"tests/schemas/test_fastavro_paser_schema.py::test_nested_schemas_splitted",
"tests/schemas/test_fastavro_paser_schema.py::test_nested_scheamas_splitted_with_intermediates",
"tests/schemas/test_schema.py::test_total_schema_fields_from_class",
"tests/schemas/test_schema.py::test_total_schema_fields_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_field_metadata",
"tests/schemas/test_schema.py::test_schema_render_from_class",
"tests/schemas/test_schema.py::test_schema_render_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_doc",
"tests/schemas/test_schema.py::test_schema_render_from_instance_with_doc",
"tests/schemas/test_schema.py::test_schema_doc_from_meta",
"tests/schemas/test_schema.py::test_schema_cached",
"tests/schemas/test_schema.py::test_extra_avro_attributes",
"tests/schemas/test_schema.py::test_class_empty_metaclass",
"tests/schemas/test_schema.py::test_invalid_schema_type",
"tests/schemas/test_schema.py::test_not_implementd_methods",
"tests/schemas/test_schema.py::test_inherit_dataclass_missing_docs",
"tests/schemas/test_schema.py::test_get_fields",
"tests/schemas/test_schema.py::test_schema_name_from_relationship",
"tests/schemas/test_schema.py::test_alias_from_relationship",
"tests/schemas/test_schema.py::test_validate",
"tests/schemas/test_schema.py::test_get_enum_type_map",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_unions",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_sub_record",
"tests/schemas/test_schema.py::test_deserialize_complex_types",
"tests/schemas/test_schema.py::test_deserialize_complex_types_invalid_enum_instance",
"tests/schemas/test_schema.py::test_parse_obj",
"tests/schemas/test_schema.py::test_avro_schema_to_python_method_with_inheritance",
"tests/schemas/test_schema.py::test_avro_schema_method_with_inheritance",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_complex_types",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_complex_types_and_defaults",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_unions_type",
"tests/schemas/test_schema_with_complex_types.py::test_schema_with_unions_defaults",
"tests/schemas/test_schema_with_complex_types.py::test_schema_typing_optional_behavior"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-06 14:04:44+00:00
|
mit
| 3,719 |
|
marcosschroh__dataclasses-avroschema-383
|
diff --git a/dataclasses_avroschema/fields/fields.py b/dataclasses_avroschema/fields/fields.py
index b7fee26..56b3a9e 100644
--- a/dataclasses_avroschema/fields/fields.py
+++ b/dataclasses_avroschema/fields/fields.py
@@ -692,7 +692,11 @@ class RecordField(Field):
return record_type
def default_to_avro(self, value: "schema_generator.AvroModel") -> typing.Dict:
- return value.to_dict()
+ schema_def = value.schema_def or value._generate_avro_schema()
+ return {
+ fieldname: field.default_to_avro(getattr(value, fieldname))
+ for fieldname, field in schema_def.get_fields_map().items()
+ }
def fake(self) -> typing.Any:
return self.type.fake()
diff --git a/dataclasses_avroschema/schema_definition.py b/dataclasses_avroschema/schema_definition.py
index e988ca2..50bec76 100644
--- a/dataclasses_avroschema/schema_definition.py
+++ b/dataclasses_avroschema/schema_definition.py
@@ -170,3 +170,6 @@ class AvroSchemaDefinition(BaseSchemaDefinition):
schema["aliases"] = self.metadata.aliases
return schema
+
+ def get_fields_map(self) -> typing.Dict[str, Field]:
+ return self.fields_map
|
marcosschroh/dataclasses-avroschema
|
883d147c3a8c2f2bd5e854741cb3374abc18f3e5
|
diff --git a/tests/fields/test_BaseField.py b/tests/fields/test_BaseField.py
index d8ae238..4c913eb 100644
--- a/tests/fields/test_BaseField.py
+++ b/tests/fields/test_BaseField.py
@@ -1,4 +1,7 @@
-from dataclasses_avroschema import AvroField
+import dataclasses
+from datetime import datetime
+
+from dataclasses_avroschema import AvroField, AvroModel, utils
def test_render():
@@ -61,3 +64,40 @@ def test_render_metadata():
expected = [("encoding", "some_exotic_encoding"), ("doc", "Official Breed Name")]
assert expected == field.get_metadata()
+
+
+def test_render_complex_types():
+ @dataclasses.dataclass
+ class Metadata(AvroModel):
+ timestamp: datetime = dataclasses.field(
+ default_factory=lambda: datetime(2023, 10, 21, 11, 11),
+ )
+
+ parent = AvroModel()
+ parent.metadata = utils.SchemaMetadata.create(type)
+ field = AvroField(
+ "metadata",
+ Metadata,
+ metadata={"desc": "Some metadata"},
+ default_factory=Metadata,
+ parent=parent,
+ )
+
+ expected = {
+ "desc": "Some metadata",
+ "name": "metadata",
+ "type": {
+ "type": "record",
+ "name": "Metadata",
+ "fields": [
+ {
+ "name": "timestamp",
+ "type": {"type": "long", "logicalType": "timestamp-millis"},
+ "default": 1697886660000,
+ }
+ ],
+ },
+ "default": {"timestamp": 1697886660000},
+ }
+
+ assert expected == dict(field.render())
|
RecordField default_to_avro produces non serializable output
**Describe the bug**
When having a nested model the avro_schema function of the model fails if the nested attribute contains a non json serializable field e.g. datetime.
**To Reproduce**
```python
import dataclasses
from dataclasses_avroschema import AvroModel
from datetime import datetime
@dataclasses.dataclass
class Metadata(AvroModel):
event_time: datetime = dataclasses.field(default_factory=datetime.utcnow, default=dataclasses.MISSING)
@dataclasses.dataclass
class Record(AvroModel):
metadata: Metadata = dataclasses.field(default_factory=lambda: Metadata(), default=dataclasses.MISSING)
Record()
# Out: Record(metadata=Metadata(event_time=datetime.datetime(2023, 8, 5, 7, 32, 27, 798392)))
Record.avro_schema()
# Fails with: TypeError: Object of type datetime is not JSON serializable
```
**Expected behavior**
Should produce the avor schema with default of datetime as string
As for my understanding it is due to `RecordField.default_to_avro` simply calling the `to_dict` function on the value provided.
As far as I can tell the `default_to_avro` is only called on rendering, so it should be fine to do `json.loads(value.to_json())` here.
Edit: fixed suggested fix to use `to_json()`
|
0.0
|
883d147c3a8c2f2bd5e854741cb3374abc18f3e5
|
[
"tests/fields/test_BaseField.py::test_render_complex_types"
] |
[
"tests/fields/test_BaseField.py::test_render",
"tests/fields/test_BaseField.py::test_render_metadata"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-05 10:01:47+00:00
|
mit
| 3,720 |
|
marcosschroh__dataclasses-avroschema-402
|
diff --git a/dataclasses_avroschema/model_generator/generator.py b/dataclasses_avroschema/model_generator/generator.py
index 57290e3..acce0e1 100644
--- a/dataclasses_avroschema/model_generator/generator.py
+++ b/dataclasses_avroschema/model_generator/generator.py
@@ -424,10 +424,18 @@ class ModelGenerator:
field_name: str = field["name"]
enum_name = casefy.pascalcase(field_name)
+
+ symbols_map = {}
+ for symbol in field["symbols"]:
+ key = casefy.uppercase(symbol)
+ if key in symbols_map:
+ key = symbol
+ symbols_map[key] = symbol
+
symbols = self.field_identation.join(
[
- templates.enum_symbol_template.safe_substitute(key=casefy.uppercase(symbol), value=f'"{symbol}"')
- for symbol in field["symbols"]
+ templates.enum_symbol_template.safe_substitute(key=key, value=f'"{value}"')
+ for key, value in symbols_map.items()
]
)
docstring = self.render_docstring(docstring=field.get("doc"))
diff --git a/docs/model_generator.md b/docs/model_generator.md
index 5b2975c..351e554 100644
--- a/docs/model_generator.md
+++ b/docs/model_generator.md
@@ -411,3 +411,58 @@ class User(AvroModel):
class Meta:
field_order = ['has_pets', 'name', 'age', 'money']
```
+
+## Enums and case sensitivity
+
+Sometimes there are schemas that contains the `symbols` which are case sensivity, for example `"symbols": ["P", "p"]`.
+Having something like that is NOT reccomended at all because it is meaninless, really hard to undestand the intention of it. Avoid it!!!
+
+When the schema generator encounter this situation it can not generated the proper `enum` with `uppercases` key so it will use the `symbol` without any transformation
+
+```python
+from dataclasses_avroschema import ModelGenerator
+
+schema = {
+ "type": "record",
+ "name": "User",
+ "fields": [
+ {
+ "name": "unit_multi_player",
+ "type": {
+ "type": "enum",
+ "name": "unit_multi_player",
+ "symbols": ["Q", "q"],
+ },
+ }
+ ],
+}
+
+model_generator = ModelGenerator()
+result = model_generator.render(schema=schema)
+
+# save the result in a file
+with open("models.py", mode="+w") as f:
+ f.write(result)
+```
+
+Then the result will be:
+
+```python
+# models.py
+from dataclasses_avroschema import AvroModel
+import dataclasses
+import enum
+
+
+class UnitMultiPlayer(enum.Enum):
+ Q = "Q"
+ q = "q"
+
+
[email protected]
+class User(AvroModel):
+ unit_multi_player: UnitMultiPlayer
+
+```
+
+As the example shows the second enum member `UnitMultiPlayer.p` is not in uppercase otherwise will collide with the first member `UnitMultiPlayer.P`
|
marcosschroh/dataclasses-avroschema
|
d1e856e0a7c6c610741f54061f17df2aa8d9180e
|
diff --git a/tests/model_generator/conftest.py b/tests/model_generator/conftest.py
index bec01d9..58873a6 100644
--- a/tests/model_generator/conftest.py
+++ b/tests/model_generator/conftest.py
@@ -212,6 +212,24 @@ def schema_with_enum_types() -> Dict:
}
[email protected]
+def schema_with_enum_types_case_sensitivity() -> Dict:
+ return {
+ "type": "record",
+ "name": "User",
+ "fields": [
+ {
+ "name": "unit_multi_player",
+ "type": {
+ "type": "enum",
+ "name": "unit_multi_player",
+ "symbols": ["Q", "q"],
+ },
+ }
+ ],
+ }
+
+
@pytest.fixture
def schema_one_to_one_relationship() -> JsonDict:
return {
diff --git a/tests/model_generator/test_model_generator.py b/tests/model_generator/test_model_generator.py
index a3d3e6d..a0a9279 100644
--- a/tests/model_generator/test_model_generator.py
+++ b/tests/model_generator/test_model_generator.py
@@ -204,6 +204,28 @@ class User(AvroModel):
assert result.strip() == expected_result.strip()
+def test_schema_with_enum_types_case_sensitivity(schema_with_enum_types_case_sensitivity: types.JsonDict) -> None:
+ expected_result = """
+from dataclasses_avroschema import AvroModel
+import dataclasses
+import enum
+
+
+class UnitMultiPlayer(enum.Enum):
+ Q = "Q"
+ q = "q"
+
+
[email protected]
+class User(AvroModel):
+ unit_multi_player: UnitMultiPlayer
+
+"""
+ model_generator = ModelGenerator()
+ result = model_generator.render(schema=schema_with_enum_types_case_sensitivity)
+ assert result.strip() == expected_result.strip()
+
+
def test_schema_one_to_one_relationship(schema_one_to_one_relationship: types.JsonDict) -> None:
expected_result = """
from dataclasses_avroschema import AvroModel
|
Issue with case sensitivity in enums
**Describe the bug**
python Fields that represent enums values are translated directly to UPPERCASE regardless of their original casing.
This results in duplicate fields if original values from the schema only differ in casing.
I encounted this is issue in our from standards derived avro models that use SI units that have quite some cases where this is an issue like M(ega) vs m(illi), M(onth) vs m(inute) etc.
```
{
"name": "UnitMultiplier",
"type": "enum",
"symbols": [
"p",
"P",
....
]
}
```
results in
```
class UnitMultiplier(enum.Enum):
P = "p"
P = "P"
...
```
Perhaps rename the non-original uppercase P field to 'P_' or something when duplicates are found.
**To Reproduce**
See above
**Expected behavior**
Support for case-sensitive uniqueness such that any valid avro schema results in a valid generated python model.
|
0.0
|
d1e856e0a7c6c610741f54061f17df2aa8d9180e
|
[
"tests/model_generator/test_model_generator.py::test_schema_with_enum_types_case_sensitivity"
] |
[
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_as_defined_types",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_with_default_null",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_with_unions",
"tests/model_generator/test_model_generator.py::test_model_generator_array_type",
"tests/model_generator/test_model_generator.py::test_model_generator_map_type",
"tests/model_generator/test_model_generator.py::test_schema_with_fixed_types",
"tests/model_generator/test_model_generator.py::test_schema_with_enum_types",
"tests/model_generator/test_model_generator.py::test_schema_one_to_one_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_many_array_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_many_map_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_self_relationship",
"tests/model_generator/test_model_generator.py::test_decimal_field",
"tests/model_generator/test_model_generator.py::test_schema_logical_types",
"tests/model_generator/test_model_generator.py::test_field_order",
"tests/model_generator/test_model_generator.py::test_model_generator_render_module_from_multiple_schemas"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-30 13:21:25+00:00
|
mit
| 3,721 |
|
marcosschroh__dataclasses-avroschema-403
|
diff --git a/dataclasses_avroschema/model_generator/generator.py b/dataclasses_avroschema/model_generator/generator.py
index acce0e1..6da2653 100644
--- a/dataclasses_avroschema/model_generator/generator.py
+++ b/dataclasses_avroschema/model_generator/generator.py
@@ -275,14 +275,16 @@ class ModelGenerator:
result = templates.field_template.safe_substitute(name=name, type=language_type)
# optional field attribute
- default = self.get_field_default(field_type=type, default=default, name=name, field_metadata=field_metadata)
+ default_generated = self.get_field_default(
+ field_type=type, default=default, name=name, field_metadata=field_metadata
+ )
has_default = False
- if default is not dataclasses.MISSING:
- has_default = True
-
+ if default_generated is not dataclasses.MISSING:
if type != field_utils.DECIMAL:
- result += templates.field_default_template.safe_substitute(default=default)
+ result += templates.field_default_template.safe_substitute(default=default_generated)
+ if default is not dataclasses.MISSING:
+ has_default = True
return FieldRepresentation(name=name, string_representation=result, has_default=has_default)
|
marcosschroh/dataclasses-avroschema
|
f009f4a98e50c5051dbc016f4f2c5724150c0b53
|
diff --git a/tests/model_generator/conftest.py b/tests/model_generator/conftest.py
index 58873a6..35b7af6 100644
--- a/tests/model_generator/conftest.py
+++ b/tests/model_generator/conftest.py
@@ -13,10 +13,10 @@ def schema() -> Dict:
"fields": [
{"name": "age", "type": "int"},
{"name": "money_available", "type": "double"},
+ {"name": "weight", "type": "int", "unit": "kg"},
{"name": "name", "type": "string", "default": "marcos"},
{"name": "pet_age", "type": "int", "default": 1},
{"name": "height", "type": "float", "default": 10.10},
- {"name": "weight", "type": "int", "unit": "kg"},
{"name": "expirience", "type": "int", "unit": "years", "default": 10},
{
"name": "is_student",
@@ -502,3 +502,15 @@ def schema_with_pydantic_constrained_fields() -> JsonDict:
{"pydantic-class": "conint(gt=10, lt=20)", "name": "constrained_int", "type": "int"},
],
}
+
+
[email protected]
+def with_fields_with_metadata() -> JsonDict:
+ return {
+ "type": "record",
+ "name": "Message",
+ "fields": [
+ {"name": "fieldwithdefault", "type": "string", "default": "some default value"},
+ {"name": "someotherfield", "type": "long", "aliases": ["oldname"], "doc": "test"},
+ ],
+ }
diff --git a/tests/model_generator/test_model_generator.py b/tests/model_generator/test_model_generator.py
index a0a9279..5d01171 100644
--- a/tests/model_generator/test_model_generator.py
+++ b/tests/model_generator/test_model_generator.py
@@ -17,10 +17,10 @@ class User(AvroModel):
\"""
age: types.Int32
money_available: float
+ weight: types.Int32 = dataclasses.field(metadata={'unit': 'kg'})
name: str = "marcos"
pet_age: types.Int32 = 1
height: types.Float32 = 10.1
- weight: types.Int32 = dataclasses.field(metadata={'unit': 'kg'})
expirience: types.Int32 = dataclasses.field(metadata={'unit': 'years'}, default=10)
is_student: bool = True
encoded: bytes = b"Hi"
@@ -446,10 +446,10 @@ class User(AvroModel):
\"""
age: types.Int32
money_available: float
+ weight: types.Int32 = dataclasses.field(metadata={'unit': 'kg'})
name: str = "marcos"
pet_age: types.Int32 = 1
height: types.Float32 = 10.1
- weight: types.Int32 = dataclasses.field(metadata={'unit': 'kg'})
expirience: types.Int32 = dataclasses.field(metadata={'unit': 'years'}, default=10)
is_student: bool = True
encoded: bytes = b"Hi"
@@ -471,3 +471,23 @@ class Address(AvroModel):
model_generator = ModelGenerator()
result = model_generator.render_module(schemas=[schema, schema_2])
assert result.strip() == expected_result.strip()
+
+
+def test_model_generator_with_fields_with_metadata(with_fields_with_metadata: types.JsonDict) -> None:
+ expected_result = """
+from dataclasses_avroschema import AvroModel
+import dataclasses
+
+
[email protected]
+class Message(AvroModel):
+ someotherfield: int = dataclasses.field(metadata={'aliases': ['oldname'], 'doc': 'test'})
+ fieldwithdefault: str = "some default value"
+
+ class Meta:
+ field_order = ['fieldwithdefault', 'someotherfield']
+
+"""
+ model_generator = ModelGenerator()
+ result = model_generator.render(schema=with_fields_with_metadata)
+ assert result.strip() == expected_result.strip()
|
field ordering issue causing invalid python models
**Describe the bug**
Whenever there is a field with some additional metadata like "aliases" or "doc" it causes the generated python field to look like this:
```
fieldwithdefault: str = "some default value"
someotherfield: int = dataclasses.field(metadata={'aliases': ['oldname']})
```
However these get ordered behind (original order?) fields with a default value which is not valid python. Without aliases or doc these fields do correctly get shifted before fields with defaults.
**To Reproduce**
```
{
"type": "record",
"name": "Message",
"fields": [
{
"name": "fieldwithdefault",
"type": "string",
"default": "some default value"
},
{
"name": "someotherfield",
"type": "long",
"aliases": ["oldname"],
"doc": "test"
}
]
}
```
**Expected behavior**
fields get ordered with default values last
```
someotherfield: int = dataclasses.field(metadata={'aliases': ['oldname']})
fieldwithdefault: str = "some default value"
```
|
0.0
|
f009f4a98e50c5051dbc016f4f2c5724150c0b53
|
[
"tests/model_generator/test_model_generator.py::test_model_generator_with_fields_with_metadata"
] |
[
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_as_defined_types",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_with_default_null",
"tests/model_generator/test_model_generator.py::test_model_generator_primitive_types_with_unions",
"tests/model_generator/test_model_generator.py::test_model_generator_array_type",
"tests/model_generator/test_model_generator.py::test_model_generator_map_type",
"tests/model_generator/test_model_generator.py::test_schema_with_fixed_types",
"tests/model_generator/test_model_generator.py::test_schema_with_enum_types",
"tests/model_generator/test_model_generator.py::test_schema_with_enum_types_case_sensitivity",
"tests/model_generator/test_model_generator.py::test_schema_one_to_one_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_many_array_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_many_map_relationship",
"tests/model_generator/test_model_generator.py::test_schema_one_to_self_relationship",
"tests/model_generator/test_model_generator.py::test_decimal_field",
"tests/model_generator/test_model_generator.py::test_schema_logical_types",
"tests/model_generator/test_model_generator.py::test_field_order",
"tests/model_generator/test_model_generator.py::test_model_generator_render_module_from_multiple_schemas"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-08-30 14:13:37+00:00
|
mit
| 3,722 |
|
mardiros__aioxmlrpc-10
|
diff --git a/aioxmlrpc/client.py b/aioxmlrpc/client.py
index 35bca86..5ed292b 100644
--- a/aioxmlrpc/client.py
+++ b/aioxmlrpc/client.py
@@ -79,8 +79,14 @@ class AioTransport(xmlrpc.Transport):
raise
except Exception as exc:
log.error('Unexpected error', exc_info=True)
- raise ProtocolError(url, response.status,
- str(exc), response.headers)
+ if response is not None:
+ errcode = response.status
+ headers = response.headers
+ else:
+ errcode = 0
+ headers = {}
+
+ raise ProtocolError(url, errcode, str(exc), headers)
return self.parse_response(body)
def parse_response(self, body):
|
mardiros/aioxmlrpc
|
5480f35630d166bfa686e2e02b28c581e16bb723
|
diff --git a/aioxmlrpc/tests/test_client.py b/aioxmlrpc/tests/test_client.py
index 304045d..98fef6b 100644
--- a/aioxmlrpc/tests/test_client.py
+++ b/aioxmlrpc/tests/test_client.py
@@ -128,3 +128,28 @@ class ServerProxyTestCase(TestCase):
self.assertEqual(response, 1)
self.assertIs(self.loop, client._loop)
self.assertTrue(transp._connector.close.called)
+
+
[email protected]
+def failing_request(*args, **kwargs):
+ raise OSError
+
+
+class HTTPErrorTestCase(TestCase):
+
+ def setUp(self):
+ self.loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(None)
+ self.aiohttp_request = mock.patch('aiohttp.request', new=failing_request)
+ self.aiohttp_request.start()
+
+ def tearDown(self):
+ self.aiohttp_request.stop()
+
+ def test_http_error(self):
+ from aioxmlrpc.client import ServerProxy, ProtocolError
+ client = ServerProxy('http://nonexistent/nonexistent', loop=self.loop)
+ self.assertRaises(ProtocolError,
+ self.loop.run_until_complete,
+ client.name.space.proxfyiedcall()
+ )
|
AttributeError when server closes socket
If the XMLRPC server closes the socket, which is rather common if it's Apache, I get this error:
```
Traceback (most recent call last):
File "/usr/share/routest/env/lib/python3.5/site-packages/aioxmlrpc/client.py", line 71, in request
connector=self._connector, loop=self._loop)
File "/usr/share/routest/env/lib/python3.5/site-packages/aiohttp/client.py", line 605, in __iter__
return (yield from self._coro)
File "/usr/share/routest/env/lib/python3.5/site-packages/aiohttp/client.py", line 161, in _request
raise RuntimeError('Session is closed')
RuntimeError: Session is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/share/routest/env/lib/python3.5/site-packages/routest/resource/space/serializers.py", line 159, in check_job_status
self.job_status = await self.lava.scheduler.job_status(self.job_id)
File "/usr/share/routest/env/lib/python3.5/site-packages/aioxmlrpc/client.py", line 37, in __call__
ret = yield from self.__send(self.__name, args)
File "/usr/share/routest/env/lib/python3.5/site-packages/aioxmlrpc/client.py", line 130, in __request
verbose=self.__verbose
File "/usr/share/routest/env/lib/python3.5/site-packages/aioxmlrpc/client.py", line 80, in request
raise ProtocolError(url, response.status,
AttributeError: 'NoneType' object has no attribute 'status'
```
|
0.0
|
5480f35630d166bfa686e2e02b28c581e16bb723
|
[
"aioxmlrpc/tests/test_client.py::HTTPErrorTestCase::test_http_error"
] |
[
"aioxmlrpc/tests/test_client.py::ServerProxyTestCase::test_close_transport",
"aioxmlrpc/tests/test_client.py::ServerProxyTestCase::test_http_500",
"aioxmlrpc/tests/test_client.py::ServerProxyTestCase::test_xmlrpc_fault",
"aioxmlrpc/tests/test_client.py::ServerProxyTestCase::test_xmlrpc_ok",
"aioxmlrpc/tests/test_client.py::ServerProxyTestCase::test_xmlrpc_ok_global_loop"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-23 00:48:03+00:00
|
bsd-3-clause
| 3,723 |
|
markusressel__container-app-conf-61
|
diff --git a/Pipfile.lock b/Pipfile.lock
index 884d1ae..700bcba 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -42,11 +42,11 @@
},
"ruamel.yaml": {
"hashes": [
- "sha256:0850def9ebca23b3a8c64c4b4115ebb6b364a10d49f89d289a26ee965e1e7d9d",
- "sha256:8f1e15421668b9edf30ed02899f5f81aff9808a4271935776f61a99a569a13da"
+ "sha256:44bc6b54fddd45e4bc0619059196679f9e8b79c027f4131bb072e6a22f4d5e28",
+ "sha256:ac79fb25f5476e8e9ed1c53b8a2286d2c3f5dde49eb37dbcee5c7eb6a8415a22"
],
"index": "pypi",
- "version": "==0.17.2"
+ "version": "==0.17.4"
},
"ruamel.yaml.clib": {
"hashes": [
@@ -82,7 +82,7 @@
"sha256:e9f7d1d8c26a6a12c23421061f9022bb62704e38211fe375c645485f38df34a2",
"sha256:f6061a31880c1ed6b6ce341215336e2f3d0c1deccd84957b6fa8ca474b41e89f"
],
- "markers": "platform_python_implementation == 'CPython' and python_version < '3.10'",
+ "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'",
"version": "==0.2.2"
},
"six": {
@@ -90,12 +90,14 @@
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.15.0"
},
"toml": {
"hashes": [
"sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c",
- "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e"
+ "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e",
+ "sha256:f1db651f9657708513243e61e6cc67d101a39bad662eaa9b5546f789338e07a3"
],
"index": "pypi",
"version": "==0.10.0"
@@ -115,6 +117,7 @@
"sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6",
"sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700"
],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==20.3.0"
},
"iniconfig": {
@@ -129,6 +132,7 @@
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==20.9"
},
"pluggy": {
@@ -136,6 +140,7 @@
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.13.1"
},
"py": {
@@ -143,6 +148,7 @@
"sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
"sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
],
+ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.10.0"
},
"pyparsing": {
@@ -150,6 +156,7 @@
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
+ "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
},
"pytest": {
@@ -163,7 +170,8 @@
"toml": {
"hashes": [
"sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c",
- "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e"
+ "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e",
+ "sha256:f1db651f9657708513243e61e6cc67d101a39bad662eaa9b5546f789338e07a3"
],
"index": "pypi",
"version": "==0.10.0"
diff --git a/README.md b/README.md
index caa85ed..6d2c98e 100644
--- a/README.md
+++ b/README.md
@@ -209,15 +209,14 @@ implementations are available:
Since you only specify the key path of a config entry the ENV
key is generated automatically by concatenating all key path items
-using an underscore and converting to uppercase:
+using an underscore, converting to uppercase and replacing any remaining
+hyphens also with an underscore:
```python
-key_path = ["my_app", "example"]
-env_key = "_".join(key_path).upper()
+key_path = ["my_app", "my-example"]
```
-yields `MY_APP_EXAMPLE`.
-
+would yield `MY_APP_MY_EXAMPLE`.
### Filesystem Source
diff --git a/container_app_conf/source/env_source.py b/container_app_conf/source/env_source.py
index 390dd54..49db3ed 100644
--- a/container_app_conf/source/env_source.py
+++ b/container_app_conf/source/env_source.py
@@ -31,11 +31,14 @@ class EnvSource(DataSource):
KEY_SPLIT_CHAR = "_"
def has(self, entry: ConfigEntry) -> bool:
- return self.env_key(entry) in self.root.keys()
+ original_key = self.env_key(entry)
+ normalized_key = original_key.replace('-', '_')
+ return original_key in self.root.keys() or normalized_key in self.root.keys()
def get(self, entry: ConfigEntry) -> str or None:
- key = self.env_key(entry)
- return self.root.get(key, None)
+ original_key = self.env_key(entry)
+ normalized_key = original_key.replace('-', '_')
+ return self.root.get(original_key, self.root.get(normalized_key, None))
@staticmethod
def env_key(entry: ConfigEntry) -> str:
|
markusressel/container-app-conf
|
e4ec0c8c51b5a460744c033bc19ab76b4f74617c
|
diff --git a/tests/data_source/source_test.py b/tests/data_source/source_test.py
index 8905fa7..280572c 100644
--- a/tests/data_source/source_test.py
+++ b/tests/data_source/source_test.py
@@ -17,11 +17,14 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+import os
from typing import Dict
+from unittest import mock
from container_app_conf import ConfigEntry
from container_app_conf.entry.int import IntConfigEntry
from container_app_conf.entry.string import StringConfigEntry
+from container_app_conf.source.env_source import EnvSource
from container_app_conf.source.json_source import JsonSource
from container_app_conf.source.toml_source import TomlSource
from container_app_conf.source.yaml_source import YamlSource
@@ -110,3 +113,32 @@ class TestDataSource(TestBase):
self.assertEqual(source.get(str_entry), "value")
self.assertTrue(source.has(int_entry))
self.assertEqual(source.get(int_entry), 2)
+
+ def test_env(self):
+ str_entry = StringConfigEntry(
+ key_path=["test-ing", "key1"],
+ default="value"
+ )
+ int_entry = IntConfigEntry(
+ key_path=["testing", "key2"],
+ default=2
+ )
+
+ source = EnvSource()
+ original_key = EnvSource.env_key(str_entry)
+ expected = "expected"
+ with mock.patch.dict(os.environ, {original_key: expected}, clear=True):
+ source.load()
+
+ self.assertTrue(source.has(str_entry))
+ self.assertEqual(source.get(str_entry), expected)
+ self.assertFalse(source.has(int_entry))
+
+ normalized_env_key = original_key.replace('-', '_')
+ self.assertNotEqual(original_key, normalized_env_key)
+ with mock.patch.dict(os.environ, {normalized_env_key: expected + '2'}, clear=True):
+ source.load()
+
+ self.assertTrue(source.has(str_entry))
+ self.assertEqual(source.get(str_entry), expected + '2')
+ self.assertFalse(source.has(int_entry))
|
Normalize hyphens in env source
The current [KEY_PATH_REGEX](https://github.com/markusressel/container-app-conf/blob/e4ec0c8c51b5a460744c033bc19ab76b4f74617c/container_app_conf/const.py#L22) allows both hyphens and underscores. As brought up in https://github.com/markusressel/keel-telegram-bot/issues/29 hyphens are not POXIS compliant.
# Solution
Normalize hyphens to underscores in the env source and check for both the original and normalized version to avoid a breaking change.
|
0.0
|
e4ec0c8c51b5a460744c033bc19ab76b4f74617c
|
[
"tests/data_source/source_test.py::TestDataSource::test_env"
] |
[
"tests/data_source/source_test.py::TestDataSource::test_json",
"tests/data_source/source_test.py::TestDataSource::test_priority",
"tests/data_source/source_test.py::TestDataSource::test_toml",
"tests/data_source/source_test.py::TestDataSource::test_yaml"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-15 10:34:49+00:00
|
mit
| 3,724 |
|
markusressel__container-app-conf-8
|
diff --git a/README.md b/README.md
index 28db477..07383a2 100644
--- a/README.md
+++ b/README.md
@@ -53,8 +53,8 @@ class AppConfig(Config):
| `StringConfigEntry` | Takes the raw string input | `str` |
| `DateConfigEntry` | Parses various datetime formats (see [python-dateutil](https://github.com/dateutil/dateutil/)) | `datetime` |
| `TimeDeltaConfigEntry` | Parses various timedelta formats (see [pytimeparse](https://github.com/wroberts/pytimeparse)) | `timedelta` |
-| `FileConfigEntry` | Parses a file path | `str` |
-| `DirectoryConfigEntry` | Parses a directory path | `str` |
+| `FileConfigEntry` | Parses a file path | `Path` |
+| `DirectoryConfigEntry` | Parses a directory path | `Path` |
| `ListConfigEntry` | Parses a comma separated string to a list of items specified in another `ConfigEntry` (in yaml it can also be specified as a yaml list) | `[]` |
If none of the existing types suit your needs you can easily create your
diff --git a/container_app_conf/entry/file.py b/container_app_conf/entry/file.py
index 32ffe3e..8feb494 100644
--- a/container_app_conf/entry/file.py
+++ b/container_app_conf/entry/file.py
@@ -18,6 +18,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
+import pathlib
from container_app_conf import ConfigEntry
@@ -41,18 +42,18 @@ class FileConfigEntry(ConfigEntry):
:return: the parsed file value
"""
str_value = str(value)
+ file = pathlib.Path(str_value)
if str_value.endswith(os.sep):
- raise AssertionError("File path should not end with delimiter: {}".format(str_value))
+ raise AssertionError("File path should not end with '{}' delimiter: {}".format(os.sep, str_value))
- if os.path.exists(str_value):
- if not os.path.isfile(str_value):
- IsADirectoryError("Path is not a file: {}".format(str_value))
- else:
- if self.check_existence:
- raise FileNotFoundError("File does not exist: {}".format(value))
+ if file.is_dir():
+ raise IsADirectoryError("Path is not a file: {}".format(str_value))
- return os.path.abspath(str_value)
+ if self.check_existence and not file.exists():
+ raise FileNotFoundError("File does not exist: {}".format(value))
+
+ return file
class DirectoryConfigEntry(ConfigEntry):
@@ -74,12 +75,15 @@ class DirectoryConfigEntry(ConfigEntry):
:return: the parsed folder value
"""
str_value = str(value)
+ directory = pathlib.Path(str_value)
+
+ if not str_value.endswith(os.sep):
+ raise AssertionError("Directory path should end with '{}' delimiter: {}".format(os.sep, str_value))
+
+ if directory.is_file():
+ raise NotADirectoryError("Path is not a directory: {}".format(str_value))
- if os.path.exists(str_value):
- if not os.path.isdir(str_value):
- raise NotADirectoryError("Path is not a directory: {}".format(str_value))
- else:
- if self.check_existence:
- raise FileNotFoundError("directory does not exist: {}".format(value))
+ if self.check_existence and not directory.exists():
+ raise FileNotFoundError("directory does not exist: {}".format(value))
- return os.path.abspath(str_value)
+ return directory
|
markusressel/container-app-conf
|
563f0070757a9b97255287cf13ffcb1ad5af3fdd
|
diff --git a/tests/__init__.py b/tests/__init__.py
index 5dedea8..63fb44f 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -104,8 +104,7 @@ class TestBase(unittest.TestCase):
class EntryTestBase(TestBase):
- @staticmethod
- def assert_input_output(entry: ConfigEntry, list_of_tuples: [()]):
+ def assert_input_output(self, entry: ConfigEntry, list_of_tuples: [()]):
for item in list_of_tuples:
assert len(item) == 2
@@ -119,4 +118,4 @@ class EntryTestBase(TestBase):
except:
assert True
else:
- assert entry._parse_value(input) == result
+ self.assertEquals(entry._parse_value(input), result)
diff --git a/tests/entry_test.py b/tests/entry_test.py
index c7662f3..4059875 100644
--- a/tests/entry_test.py
+++ b/tests/entry_test.py
@@ -18,7 +18,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-import os
+from pathlib import Path
from py_range_parse import Range
@@ -34,8 +34,7 @@ from tests import EntryTestBase
class EntryTest(EntryTestBase):
- @staticmethod
- def test_bool_entry():
+ def test_bool_entry(self):
config_entry = BoolConfigEntry(yaml_path=["bool"])
true_values = ["y", "yes", "true", "t", 1, True]
@@ -51,10 +50,9 @@ class EntryTest(EntryTestBase):
for iv in invalid_values:
input_output.append((iv, ValueError))
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_int_entry():
+ def test_int_entry(self):
config_entry = IntConfigEntry(yaml_path=["int"])
input_output = [
("5", 5),
@@ -63,10 +61,9 @@ class EntryTest(EntryTestBase):
(-3, -3)
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_float_entry():
+ def test_float_entry(self):
config_entry = FloatConfigEntry(yaml_path=["float"])
input_output = [
("5", 5.0),
@@ -78,10 +75,9 @@ class EntryTest(EntryTestBase):
("3%", 0.03)
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_date_entry():
+ def test_date_entry(self):
from datetime import datetime
from dateutil.tz import tzutc
@@ -91,10 +87,9 @@ class EntryTest(EntryTestBase):
("2008-09-03", datetime(2008, 9, 3, 0, 0, 0, 0)),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_timedelta_entry():
+ def test_timedelta_entry(self):
from datetime import timedelta
config_entry = TimeDeltaConfigEntry(yaml_path=["timedelta"])
@@ -106,18 +101,17 @@ class EntryTest(EntryTestBase):
("4:13", timedelta(hours=0, minutes=4, seconds=13)),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_file_entry():
+ def test_file_entry(self):
config_entry = FileConfigEntry(yaml_path=["file"])
input_output = [
- ("/tmp/test", "/tmp/test"),
- ("./test", os.path.abspath("./test")),
+ ("/tmp/test", Path("/tmp/test")),
+ ("./test", Path("./test")),
("/something/", AssertionError),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
config_entry = FileConfigEntry(yaml_path=["file"],
check_existence=True)
@@ -125,33 +119,32 @@ class EntryTest(EntryTestBase):
("/tmp/test", FileNotFoundError),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_directory_entry():
+ def test_directory_entry(self):
config_entry = DirectoryConfigEntry(yaml_path=["directory"])
input_output = [
- ("/tmp", "/tmp"),
- ("./test", os.path.abspath("./test")),
- ("/something/", "/something"),
+ ("/tmp", AssertionError),
+ ("/tmp/", Path("/tmp")),
+ ("./test/", Path("./test")),
+ ("/something/", Path("/something")),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
config_entry = DirectoryConfigEntry(yaml_path=["directory"],
check_existence=True)
input_output = [
- ("./", os.path.abspath("./")),
+ ("./", Path("./")),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_range_entry():
+ def test_range_entry(self):
config_entry = RangeConfigEntry(yaml_path=["range"])
input_output = [
("[-5..5]", Range(-5, 5)),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
diff --git a/tests/list_entry_test.py b/tests/list_entry_test.py
index 669602e..a6882e5 100644
--- a/tests/list_entry_test.py
+++ b/tests/list_entry_test.py
@@ -17,9 +17,9 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-import os
from datetime import datetime
from datetime import timedelta
+from pathlib import Path
from dateutil.tz import tzutc
@@ -34,8 +34,7 @@ from tests import EntryTestBase
class ListEntryTest(EntryTestBase):
- @staticmethod
- def test_str_list_entry_custom_delimiter():
+ def test_str_list_entry_custom_delimiter(self):
config_entry = ListConfigEntry(item_type=IntConfigEntry,
yaml_path=["int_list"],
delimiter="::")
@@ -43,10 +42,9 @@ class ListEntryTest(EntryTestBase):
("1::2::3", [1, 2, 3])
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_int_list_entry():
+ def test_int_list_entry(self):
config_entry = ListConfigEntry(item_type=IntConfigEntry,
yaml_path=["int_list"])
input_output = [
@@ -55,10 +53,9 @@ class ListEntryTest(EntryTestBase):
("1,2,3", [1, 2, 3])
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_float_list_entry():
+ def test_float_list_entry(self):
config_entry = ListConfigEntry(item_type=FloatConfigEntry,
yaml_path=["float_list"])
input_output = [
@@ -68,10 +65,9 @@ class ListEntryTest(EntryTestBase):
("1,2.5,3", [1.0, 2.5, 3.0])
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_date_list_entry():
+ def test_date_list_entry(self):
config_entry = ListConfigEntry(item_type=DateConfigEntry,
yaml_path=["date_list"])
@@ -84,10 +80,9 @@ class ListEntryTest(EntryTestBase):
(",".join(input_example_1), output_example_1)
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_timedelta_entry():
+ def test_timedelta_entry(self):
config_entry = ListConfigEntry(item_type=TimeDeltaConfigEntry,
yaml_path=["timedelta_list"])
input_output = [
@@ -102,10 +97,9 @@ class ListEntryTest(EntryTestBase):
)
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
- @staticmethod
- def test_file_entry():
+ def test_file_entry(self):
config_entry = ListConfigEntry(item_type=FileConfigEntry,
item_args={
"check_existence": False
@@ -117,9 +111,9 @@ class ListEntryTest(EntryTestBase):
input_output = [
(None, None),
("/tmp/", AssertionError),
- (example1, [example1]),
- (example2, [os.path.abspath(example2)]),
- (",".join(example3), [example1, os.path.abspath(example2)]),
+ (example1, [Path(example1)]),
+ (example2, [Path(example2)]),
+ (",".join(example3), [Path(example1), Path(example2)]),
]
- EntryTestBase.assert_input_output(config_entry, input_output)
+ self.assert_input_output(config_entry, input_output)
|
Use of pathlib for File- and DirectoryConfigEntry
Since **container-app-conf** uses an object-oriented approach and [pathlib](https://docs.python.org/3.5/library/pathlib.html) provides objects for paths it would be the more suitable implementation. This also makes further handling of the path way easier.
|
0.0
|
563f0070757a9b97255287cf13ffcb1ad5af3fdd
|
[
"tests/entry_test.py::EntryTest::test_directory_entry",
"tests/entry_test.py::EntryTest::test_file_entry",
"tests/list_entry_test.py::ListEntryTest::test_file_entry"
] |
[
"tests/entry_test.py::EntryTest::test_bool_entry",
"tests/entry_test.py::EntryTest::test_date_entry",
"tests/entry_test.py::EntryTest::test_float_entry",
"tests/entry_test.py::EntryTest::test_int_entry",
"tests/entry_test.py::EntryTest::test_range_entry",
"tests/entry_test.py::EntryTest::test_timedelta_entry",
"tests/list_entry_test.py::ListEntryTest::test_date_list_entry",
"tests/list_entry_test.py::ListEntryTest::test_float_list_entry",
"tests/list_entry_test.py::ListEntryTest::test_int_list_entry",
"tests/list_entry_test.py::ListEntryTest::test_str_list_entry_custom_delimiter",
"tests/list_entry_test.py::ListEntryTest::test_timedelta_entry"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-17 02:08:27+00:00
|
mit
| 3,725 |
|
marrink-lab__vermouth-martinize-205
|
diff --git a/vermouth/processors/average_beads.py b/vermouth/processors/average_beads.py
index b126dd7..8c57f2d 100644
--- a/vermouth/processors/average_beads.py
+++ b/vermouth/processors/average_beads.py
@@ -77,7 +77,7 @@ def do_average_bead(molecule, ignore_missing_graphs=False, weight=None):
for node in molecule.nodes.values():
if 'graph' in node:
- positions = np.stack([
+ positions = np.array([
subnode['position']
for subnode in node['graph'].nodes().values()
if subnode.get('position') is not None
@@ -87,7 +87,14 @@ def do_average_bead(molecule, ignore_missing_graphs=False, weight=None):
for subnode_key, subnode in node['graph'].nodes.items()
if subnode.get('position') is not None
])
- node['position'] = np.average(positions, axis=0, weights=weights)
+ try:
+ ndim = positions.shape[1]
+ except IndexError:
+ ndim = 3
+ if abs(sum(weights)) < 1e-7:
+ node['position'] = np.array([np.nan]*ndim, dtype=float)
+ else:
+ node['position'] = np.average(positions, axis=0, weights=weights)
return molecule
|
marrink-lab/vermouth-martinize
|
ca5856e938333dbd85aeb88f25316ba54f1fd84a
|
diff --git a/vermouth/tests/test_average_beads.py b/vermouth/tests/test_average_beads.py
index 4720fea..07fdc12 100644
--- a/vermouth/tests/test_average_beads.py
+++ b/vermouth/tests/test_average_beads.py
@@ -59,6 +59,30 @@ def mol_with_subgraph():
"target False": np.array([2.6, 3.6, 4.6]),
})
+ subgraph = nx.Graph()
+ mol.add_node(2, **{
+ "graph": subgraph,
+ "mapping_weights": {},
+ "target mass": np.array([np.nan, np.nan, np.nan]),
+ "target not mass": np.array([np.nan, np.nan, np.nan]),
+ "target None": np.array([np.nan, np.nan, np.nan]),
+ "target False": np.array([np.nan, np.nan, np.nan]),
+ })
+
+ subgraph = nx.Graph()
+ subgraph.add_nodes_from((
+ (0, {'mass': 1.2, 'not mass': 2.2, 'position': np.array([2, 3, 4], dtype=float),}),
+ (1, {'mass': 1.3, 'not mass': 2.3, 'position': np.array([3, 4, 5], dtype=float),}),
+ ))
+ mol.add_node(3, **{
+ "graph": subgraph,
+ "mapping_weights": {0: 0, 1: 0},
+ "target mass": np.array([np.nan, np.nan, np.nan]),
+ "target not mass": np.array([np.nan, np.nan, np.nan]),
+ "target None": np.array([np.nan, np.nan, np.nan]),
+ "target False": np.array([np.nan, np.nan, np.nan]),
+ })
+
return mol
@pytest.fixture(params=(None, 'mass', 'not mass'))
@@ -89,7 +113,7 @@ def test_do_average_bead(mol_with_subgraph, weight):
target_key = 'target {}'.format(weight)
target_positions = np.stack([node[target_key] for node in mol_with_subgraph.nodes.values()])
positions = np.stack([node['position'] for node in mol_with_subgraph.nodes.values()])
- assert np.allclose(positions, target_positions)
+ assert np.allclose(positions, target_positions, equal_nan=True)
@pytest.mark.parametrize('weight', ('mass', 'not mass'))
@@ -120,7 +144,7 @@ def test_processor_variable(mol_with_variable):
target_key = 'target {}'.format(weight)
target_positions = np.stack([node[target_key] for node in mol_with_variable.nodes.values()])
positions = np.stack([node['position'] for node in mol_with_variable.nodes.values()])
- assert np.allclose(positions, target_positions)
+ assert np.allclose(positions, target_positions, equal_nan=True)
@pytest.mark.parametrize('weight', (False, 'mass', 'not mass'))
@@ -130,4 +154,4 @@ def test_processor_weight(mol_with_variable, weight):
target_key = 'target {}'.format(weight)
target_positions = np.stack([node[target_key] for node in mol_with_variable.nodes.values()])
positions = np.stack([node['position'] for node in mol_with_variable.nodes.values()])
- assert np.allclose(positions, target_positions)
+ assert np.allclose(positions, target_positions, equal_nan=True)
|
DoAverageBead breaks if a node has no constructing atoms
In case e.g. an amino acid sidechain is completely missing there are no atoms constructing the SC1 bead (in Martini). `do_average_bead` does not like this, since `np.stack([])` raises an error.
The fix should be a fairly straightforward if/else statement that sets the position to [nan, nan, nan] if there are no constructing atoms.
Related to #17
Current work-around for users: make sure there is at least 1 atom per bead present in the input.
Found with help from Sebastian.
|
0.0
|
ca5856e938333dbd85aeb88f25316ba54f1fd84a
|
[
"vermouth/tests/test_average_beads.py::test_do_average_bead[None]",
"vermouth/tests/test_average_beads.py::test_do_average_bead[mass]",
"vermouth/tests/test_average_beads.py::test_do_average_bead[not",
"vermouth/tests/test_average_beads.py::test_processor_variable[None]",
"vermouth/tests/test_average_beads.py::test_processor_variable[mass]",
"vermouth/tests/test_average_beads.py::test_processor_variable[not",
"vermouth/tests/test_average_beads.py::test_processor_weight[None-False]",
"vermouth/tests/test_average_beads.py::test_processor_weight[None-mass]",
"vermouth/tests/test_average_beads.py::test_processor_weight[None-not",
"vermouth/tests/test_average_beads.py::test_processor_weight[mass-False]",
"vermouth/tests/test_average_beads.py::test_processor_weight[mass-mass]",
"vermouth/tests/test_average_beads.py::test_processor_weight[mass-not",
"vermouth/tests/test_average_beads.py::test_processor_weight[not"
] |
[
"vermouth/tests/test_average_beads.py::test_shoot_weight[mass]",
"vermouth/tests/test_average_beads.py::test_shoot_weight[not",
"vermouth/tests/test_average_beads.py::test_shoot_graph"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-07-11 10:08:38+00:00
|
apache-2.0
| 3,726 |
|
marrink-lab__vermouth-martinize-280
|
diff --git a/vermouth/processors/canonicalize_modifications.py b/vermouth/processors/canonicalize_modifications.py
index 2679131..33a9722 100644
--- a/vermouth/processors/canonicalize_modifications.py
+++ b/vermouth/processors/canonicalize_modifications.py
@@ -276,14 +276,18 @@ def fix_ptm(molecule):
try:
identified = identify_ptms(residue, res_ptms, options)
except KeyError:
- LOGGER.exception('Could not identify the modifications for'
- ' residues {}, involving atoms {}',
- ['{resname}{resid}'.format(**molecule.nodes[resid_to_idxs[resid][0]])
- for resid in sorted(set(resids))],
- ['{atomid}-{atomname}'.format(**molecule.nodes[idx])
- for idxs in res_ptms for idx in idxs[0]],
- type='unknown-input')
- raise
+ LOGGER.warning('Could not identify the modifications for'
+ ' residues {}, involving atoms {}',
+ ['{resname}{resid}'.format(**molecule.nodes[resid_to_idxs[resid][0]])
+ for resid in sorted(set(resids))],
+ ['{atomid}-{atomname}'.format(**molecule.nodes[idx])
+ for idxs in res_ptms for idx in idxs[0]],
+ type='unknown-input')
+ for idxs in res_ptms:
+ for idx in idxs[0]:
+ molecule.remove_node(idx)
+ continue
+
# Why this mess? There can be multiple PTMs for a single (set of)
# residue(s); and a single PTM can span multiple residues.
LOGGER.info("Identified the modifications {} on residues {}",
|
marrink-lab/vermouth-martinize
|
074bb12d17cdbbdf40723e7cf4b2fb83406cd983
|
diff --git a/vermouth/tests/test_repair_graph.py b/vermouth/tests/test_repair_graph.py
index e902d91..ece4438 100644
--- a/vermouth/tests/test_repair_graph.py
+++ b/vermouth/tests/test_repair_graph.py
@@ -19,7 +19,6 @@ Test graph reparation and related operations.
import copy
import logging
-import networkx as nx
import pytest
import vermouth
from vermouth.molecule import Link
@@ -268,14 +267,15 @@ def test_renaming(renamed_graph):
[email protected]('resid,mutations,modifications,atomnames',[
[email protected]('resid,mutations,modifications,atomnames', [
(1, ['ALA'], [], 'O C CA HA N HN CB HB1 HB2 HB3'), # The glutamate chain and N-ter are removed
(1, [], ['N-ter'], 'O C CA HA N H HN CB HB1 HB2 CG HG1 HG2 CD OE1 OE2'), # HE1 got removed
(2, ['ALA'], ['N-ter', 'C-ter'], 'O OXT C CA HA N H HN CB HB1 HB2 HB3'),
(2, ['GLU'], [], 'O C CA HA N HN CB HB1 HB2 CG HG1 HG2 CD OE1 OE2'), # Added glutamate sidechain
(5, ['GLY'], ['none'], 'N CA C O HN HA1 HA2'), # Remove O2 from C-ter mod
])
-def test_repair_graph_with_mutation_modification(system_mod, resid, mutations, modifications, atomnames):
+def test_repair_graph_with_mutation_modification(system_mod, resid, mutations,
+ modifications, atomnames):
mol = system_mod.molecules[0]
# Let's mutate res1 to ALA
for node_idx in mol:
@@ -296,11 +296,13 @@ def test_repair_graph_with_mutation_modification(system_mod, resid, mutations, m
assert resid1_atomnames == set(atomnames.split())
[email protected]('resid,mutations,modifications',[
[email protected]('resid,mutations,modifications', [
(2, [], ['GLU-H']), # The glutamate chain and N-ter are removed
(2, ['ALA', 'LEU'], [])
])
-def test_repair_graph_with_mutation_modification_error(system_mod, caplog, resid, mutations, modifications):
+def test_repair_graph_with_mutation_modification_error(system_mod, caplog,
+ resid, mutations,
+ modifications):
mol = system_mod.molecules[0]
# Let's mutate res1 to ALA
for node_idx in mol:
@@ -313,3 +315,54 @@ def test_repair_graph_with_mutation_modification_error(system_mod, caplog, resid
assert not caplog.records
mol = vermouth.RepairGraph().run_molecule(mol)
assert len(caplog.records) == 1
+
+
[email protected]('known_mod_names', [
+ [],
+ ['C-ter'],
+ ['C-ter', 'N-ter'],
+ ['GLU-H', 'N-ter'],
+])
+def test_unknown_mods_removed(caplog, repaired_graph, known_mod_names):
+ """
+ Tests that atoms that are part of modifications, but are not recognized, get
+ removed from the graph by CanonicalizeModifications
+ """
+ caplog.set_level(logging.WARNING)
+ ff = copy.copy(repaired_graph.force_field)
+ for mod_name in known_mod_names:
+ assert mod_name in ff.modifications # Purely defensive
+
+ removed_mods = []
+ for name, mod in dict(ff.modifications).items():
+ if name not in known_mod_names:
+ del ff.modifications[name]
+ removed_mods.append(mod)
+
+ repaired_graph.force_field = ff
+ mol = repaired_graph.molecules[0]
+
+ assert not caplog.records
+ assert len(mol) == 46
+ vermouth.CanonicalizeModifications().run_system(repaired_graph)
+
+ assert caplog.records
+
+ for record in caplog.records:
+ assert record.levelname == 'WARNING'
+
+ assert len(mol) < 46
+ atomnames = dict(mol.nodes(data='atomname')).values()
+ for mod in removed_mods:
+ for node_key in mod.nodes:
+ node = mod.nodes[node_key]
+ if node['PTM_atom']:
+ assert node['atomname'] not in atomnames
+
+ for node_key in mol.nodes:
+ node = mol.nodes[node_key]
+ if node.get('PTM_atom'):
+ contained_by = [mod for mod in ff.modifications.values()
+ if node.get('expected', node['atomname']) in
+ dict(mod.nodes(data='atomname')).values()]
+ assert len(contained_by) == 1
|
Increase usability by removing unrecognized atoms
Currently, if a PTM is not recognized martinize turns into a mushroom. This is not great. Instead, all atoms that are not recognized should be removed and warning(s) issued. This should either be done by CanonicalizeModifications, or a separate processor. In the second case, CanonicalizeModifications should mark all unknown atoms.
Similarly, the error message when RepairGraph can't find a reference block for a specific residue name should be better (IIRC). It's probably undesirable to remove complete unrecognized residues though.
|
0.0
|
074bb12d17cdbbdf40723e7cf4b2fb83406cd983
|
[
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[True-known_mod_names0]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[True-known_mod_names1]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[True-known_mod_names2]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[True-known_mod_names3]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[False-known_mod_names0]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[False-known_mod_names1]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[False-known_mod_names2]",
"vermouth/tests/test_repair_graph.py::test_unknown_mods_removed[False-known_mod_names3]"
] |
[
"vermouth/tests/test_repair_graph.py::test_PTM_atom_true[True-13]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_true[True-14]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_true[True-36]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_true[False-13]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_true[False-14]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_true[False-36]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-0]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-1]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-2]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-3]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-4]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-5]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-6]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-7]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-8]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-9]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-10]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-11]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-12]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[True-15]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-0]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-1]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-2]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-3]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-4]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-5]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-6]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-7]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-8]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-9]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-10]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-11]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-12]",
"vermouth/tests/test_repair_graph.py::test_PTM_atom_false[False-15]",
"vermouth/tests/test_repair_graph.py::test_uniq_names_repaired[True]",
"vermouth/tests/test_repair_graph.py::test_uniq_names_repaired[False]",
"vermouth/tests/test_repair_graph.py::test_uniq_names_canonicalize[True]",
"vermouth/tests/test_repair_graph.py::test_uniq_names_canonicalize[False]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-0-expected_names0]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-1-expected_names1]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-2-expected_names2]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-3-expected_names3]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-4-expected_names4]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-5-expected_names5]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-6-expected_names6]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-7-expected_names7]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-8-expected_names8]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-9-expected_names9]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-10-expected_names10]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-11-expected_names11]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-12-expected_names12]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-13-expected_names13]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-14-expected_names14]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-15-expected_names15]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-16-expected_names16]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-17-expected_names17]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-18-expected_names18]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-19-expected_names19]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-20-expected_names20]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-21-expected_names21]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-22-expected_names22]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-23-expected_names23]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-24-expected_names24]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-25-expected_names25]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-26-expected_names26]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-27-expected_names27]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-28-expected_names28]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-29-expected_names29]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-30-expected_names30]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-31-expected_names31]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-32-expected_names32]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-33-expected_names33]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-34-expected_names34]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-35-expected_names35]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[True-36-expected_names36]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-0-expected_names0]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-1-expected_names1]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-2-expected_names2]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-3-expected_names3]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-4-expected_names4]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-5-expected_names5]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-6-expected_names6]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-7-expected_names7]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-8-expected_names8]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-9-expected_names9]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-10-expected_names10]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-11-expected_names11]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-12-expected_names12]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-13-expected_names13]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-14-expected_names14]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-15-expected_names15]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-16-expected_names16]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-17-expected_names17]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-18-expected_names18]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-19-expected_names19]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-20-expected_names20]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-21-expected_names21]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-22-expected_names22]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-23-expected_names23]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-24-expected_names24]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-25-expected_names25]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-26-expected_names26]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-27-expected_names27]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-28-expected_names28]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-29-expected_names29]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-30-expected_names30]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-31-expected_names31]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-32-expected_names32]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-33-expected_names33]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-34-expected_names34]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-35-expected_names35]",
"vermouth/tests/test_repair_graph.py::test_name_repaired[False-36-expected_names36]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-0-expected_names0]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-1-expected_names1]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-2-expected_names2]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-3-expected_names3]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-4-expected_names4]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-5-expected_names5]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-6-expected_names6]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-7-expected_names7]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-8-expected_names8]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-9-expected_names9]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-10-expected_names10]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-11-expected_names11]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-12-expected_names12]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-13-expected_names13]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-14-expected_names14]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-15-expected_names15]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-16-expected_names16]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-17-expected_names17]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-18-expected_names18]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-19-expected_names19]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-20-expected_names20]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-21-expected_names21]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-22-expected_names22]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-23-expected_names23]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-24-expected_names24]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-25-expected_names25]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-26-expected_names26]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-27-expected_names27]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-28-expected_names28]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-29-expected_names29]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-30-expected_names30]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-31-expected_names31]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-32-expected_names32]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-33-expected_names33]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-34-expected_names34]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-35-expected_names35]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[True-36-expected_names36]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-0-expected_names0]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-1-expected_names1]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-2-expected_names2]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-3-expected_names3]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-4-expected_names4]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-5-expected_names5]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-6-expected_names6]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-7-expected_names7]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-8-expected_names8]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-9-expected_names9]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-10-expected_names10]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-11-expected_names11]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-12-expected_names12]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-13-expected_names13]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-14-expected_names14]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-15-expected_names15]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-16-expected_names16]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-17-expected_names17]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-18-expected_names18]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-19-expected_names19]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-20-expected_names20]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-21-expected_names21]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-22-expected_names22]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-23-expected_names23]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-24-expected_names24]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-25-expected_names25]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-26-expected_names26]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-27-expected_names27]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-28-expected_names28]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-29-expected_names29]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-30-expected_names30]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-31-expected_names31]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-32-expected_names32]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-33-expected_names33]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-34-expected_names34]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-35-expected_names35]",
"vermouth/tests/test_repair_graph.py::test_name_canonicalized[False-36-expected_names36]",
"vermouth/tests/test_repair_graph.py::test_common_attributes[True-2-expected_attrs0]",
"vermouth/tests/test_repair_graph.py::test_common_attributes[False-2-expected_attrs0]",
"vermouth/tests/test_repair_graph.py::test_renaming[True]",
"vermouth/tests/test_repair_graph.py::test_renaming[False]",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification[1-mutations0-modifications0-O",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification[1-mutations1-modifications1-O",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification[2-mutations2-modifications2-O",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification[2-mutations3-modifications3-O",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification[5-mutations4-modifications4-N",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification_error[2-mutations0-modifications0]",
"vermouth/tests/test_repair_graph.py::test_repair_graph_with_mutation_modification_error[2-mutations1-modifications1]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-06-26 14:37:15+00:00
|
apache-2.0
| 3,727 |
|
marrink-lab__vermouth-martinize-286
|
diff --git a/bin/martinize2 b/bin/martinize2
index 98964c7..9286793 100755
--- a/bin/martinize2
+++ b/bin/martinize2
@@ -307,7 +307,7 @@ def entry():
"If 'none', only bonds from the input file (CONECT)"
" will be used.")
file_group.add_argument('-bonds-fudge', dest='bonds_fudge', type=float,
- default=1.0, help='Factor with which Van der Waals'
+ default=1.2, help='Factor with which Van der Waals'
' radii should be scaled when determining bonds '
'based on distances.')
diff --git a/vermouth/processors/make_bonds.py b/vermouth/processors/make_bonds.py
index 0c68c6c..e78997a 100644
--- a/vermouth/processors/make_bonds.py
+++ b/vermouth/processors/make_bonds.py
@@ -60,19 +60,28 @@ VDW_RADII = { # in nm
#VALENCES = {'H': 1, 'C': 4, 'N': 3, 'O': 2, 'S': 6}
-def _bonds_from_distance(graph, nodes=None, non_edges=None, fudge=1.0):
+def _bonds_from_distance(graph, nodes=None, non_edges=None, fudge=1.2):
"""Add edges to `graph` between `nodes` based on distance.
Adds edges to `graph` between nodes in `nodes`, but will never add an edge
- that is in `non_edges`. Edges are added based on a simple distance
- criterion. The criterion can be adjusted using `fudge`. Nodes need to have
- an element attribute that is in VDW_RADII in order to be eligible.
+ that is in `non_edges`, nor between H atoms. It will also not create edges
+ where H atoms bridge separate residues. Residues are defined by the
+ '_res_serial' attribute of nodes.
+ Edges are added based on a simple distance criterion. The criterion can be
+ adjusted using `fudge`. Nodes need to have an element attribute that is in
+ VDW_RADII in order to be eligible.
Parameters
----------
graph: networkx.Graph
+ Nodes in the graph must have the attributes 'element', 'position', and
+ '_res_serial'.
nodes: collections.abc.Collection[collections.abc.Hashable]
+ The nodes that should be considered for making edges. Must be in
+ `graph`.
non_edges: collections.abc.Container[frozenset[collections.abc.Hashable, collections.abc.Hashable]]
+ A container of pairs of node keys between which no edge should be added,
+ even when they are close enough.
fudge: float
"""
if not nodes:
@@ -128,6 +137,14 @@ def _bonds_from_distance(graph, nodes=None, non_edges=None, fudge=1.0):
atom2 = nodes[node_idx2]
element1 = atom1['element']
element2 = atom2['element']
+ resserial1 = atom1['_res_serial']
+ resserial2 = atom2['_res_serial']
+
+ # Forbid H-H bonds, and in addition, prevent hydrogens from making bonds
+ # to different residues.
+ if element1 == 'H' and element2 == 'H' or \
+ (resserial1 != resserial2 and (element1 == 'H' or element2 == 'H')):
+ continue
bond_distance = 0.5 * (VDW_RADII[element1] + VDW_RADII[element2])
if dist <= bond_distance * fudge and not graph.has_edge(node_idx1, node_idx2):
@@ -203,7 +220,7 @@ def _bonds_from_names(graph, resname, nodes, force_field):
return non_edges
-def make_bonds(system, allow_name=True, allow_dist=True, fudge=1.0):
+def make_bonds(system, allow_name=True, allow_dist=True, fudge=1.2):
"""Creates bonds within molecules in the system.
First, edges will be created based on residue and atom names. Second, edges
@@ -248,9 +265,12 @@ def make_bonds(system, allow_name=True, allow_dist=True, fudge=1.0):
non_edges = set()
residue_groups = collect_residues(system, ('mol_idx chain resid resname insertion_code'.split()))
- for ((mol_idx, chain, resid, resname, insertion_code), idxs) in residue_groups.items():
+ for res_serial, (keys, idxs) in enumerate(residue_groups.items()):
+ mol_idx, chain, resid, resname, insertion_code = keys
+ for idx in idxs:
+ system.nodes[idx]['_res_serial'] = res_serial
if not allow_name:
- break
+ continue
try:
# Try adding bonds within the residue based on atom names
non_edges.update(_bonds_from_names(system, resname, idxs, force_field))
@@ -282,7 +302,7 @@ def make_bonds(system, allow_name=True, allow_dist=True, fudge=1.0):
class MakeBonds(Processor):
- def __init__(self, allow_name=True, allow_dist=True, fudge=1):
+ def __init__(self, allow_name=True, allow_dist=True, fudge=1.2):
self.allow_name = allow_name
self.allow_dist = allow_dist
self.fudge = fudge
|
marrink-lab/vermouth-martinize
|
ff99729af681bc384f67b683b56c197ed52ffa97
|
diff --git a/vermouth/tests/test_make_bonds.py b/vermouth/tests/test_make_bonds.py
index 78ce2ec..8da4485 100644
--- a/vermouth/tests/test_make_bonds.py
+++ b/vermouth/tests/test_make_bonds.py
@@ -37,17 +37,41 @@ from vermouth.processors import MakeBonds
[{}, ],
],
[
- # Single molecule with two nodes that should be connected
+ # Single molecule with two nodes that should be connected, except
+ # they're both hydrogens
[[{'element': 'H', 'position': [0, 0, 0]},
{'element': 'H', 'position': [0, 0, 0.12]}], ],
[[], ],
- [{(0, 1): {'distance': 0.12}}, ],
+ [{}],
],
[
- # Two molecule with one node each that should be connected
+ # Two molecule with one node each that should be connected, except
+ # they're both hydrogens
[[{'element': 'H', 'position': [0, 0, 0]}, ],
[{'element': 'H', 'position': [0, 0, 0.12]}], ],
[[], []],
+ [{}, {}],
+ ],
+[
+ # Single molecule with two nodes that should be connected
+ [[{'element': 'C', 'position': [0, 0, 0]},
+ {'element': 'H', 'position': [0, 0, 0.12]}], ],
+ [[], ],
+ [{(0, 1): {'distance': 0.12}}, ],
+ ],
+ [
+ # Two molecule with one node each that should be connected, except that
+ # one of the atoms is a H.
+ [[{'element': 'C', 'position': [0, 0, 0]}, ],
+ [{'element': 'H', 'position': [0, 0, 0.12]}], ],
+ [[], []],
+ [{}, {}],
+ ],
+ [
+ # Two molecule with one node each that should be connected
+ [[{'element': 'C', 'position': [0, 0, 0]}, ],
+ [{'element': 'C', 'position': [0, 0, 0.12]}], ],
+ [[], []],
[{(0, 1): {'distance': 0.12}}],
],
[
@@ -149,6 +173,7 @@ from vermouth.processors import MakeBonds
], ],
[[(0, 1, {}), (2, 3, {})], ],
[{(0, 1): {},
+ (1, 2): {'distance': 0.17},
(2, 3): {}}],
],
[
@@ -162,6 +187,7 @@ from vermouth.processors import MakeBonds
], ],
[[(0, 1, {}), (2, 3, {})], ],
[{(0, 1): {},
+ (1, 2): {'distance': 0.17},
(2, 3): {}}],
],
))
|
-cys auto is not working
My current tests with vermouth 0.3.2.dev122 and martini22 models showed that -cys auto ist not working. The extra bond between the cysteine bridge was not added in the example a tested. In martinize1 works well.
|
0.0
|
ff99729af681bc384f67b683b56c197ed52ffa97
|
[
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes2-edges2-expected_edges2]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes3-edges3-expected_edges3]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes5-edges5-expected_edges5]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes16-edges16-expected_edges16]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes17-edges17-expected_edges17]"
] |
[
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes0-edges0-expected_edges0]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes1-edges1-expected_edges1]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes4-edges4-expected_edges4]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes6-edges6-expected_edges6]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes7-edges7-expected_edges7]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes8-edges8-expected_edges8]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes9-edges9-expected_edges9]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes10-edges10-expected_edges10]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes11-edges11-expected_edges11]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes12-edges12-expected_edges12]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes13-edges13-expected_edges13]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes14-edges14-expected_edges14]",
"vermouth/tests/test_make_bonds.py::test_make_bonds[nodes15-edges15-expected_edges15]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-29 08:46:38+00:00
|
apache-2.0
| 3,728 |
|
marrink-lab__vermouth-martinize-572
|
diff --git a/vermouth/dssp/dssp.py b/vermouth/dssp/dssp.py
index 79f89ed..b9c0998 100644
--- a/vermouth/dssp/dssp.py
+++ b/vermouth/dssp/dssp.py
@@ -417,8 +417,15 @@ def convert_dssp_to_martini(sequence):
cg_sequence = ''.join(ss_cg[secstruct] for secstruct in sequence)
wildcard_sequence = ''.join('H' if secstruct == 'H' else '.'
for secstruct in cg_sequence)
+ # Flank the sequence with dots. Otherwise in a sequence consisting of only
+ # H will not have a start or end. See also issue 566.
+ # This should not cause further issues, since '..' doesn't map to anything
+ wildcard_sequence = '.' + wildcard_sequence + '.'
for pattern, replacement in patterns.items():
- wildcard_sequence = wildcard_sequence.replace(pattern, replacement)
+ while pattern in wildcard_sequence: # EXPENSIVE! :'(
+ wildcard_sequence = wildcard_sequence.replace(pattern, replacement)
+ # And remove the flanking dots again
+ wildcard_sequence = wildcard_sequence[1:-1]
result = ''.join(
wildcard if wildcard != '.' else cg
for wildcard, cg in zip(wildcard_sequence, cg_sequence)
|
marrink-lab/vermouth-martinize
|
cb8a9f68771e358bf1562fffa61304456347ef0d
|
diff --git a/vermouth/tests/test_dssp.py b/vermouth/tests/test_dssp.py
index 89c52fe..28474ff 100644
--- a/vermouth/tests/test_dssp.py
+++ b/vermouth/tests/test_dssp.py
@@ -680,3 +680,18 @@ def test_cterm_atomnames():
vermouth.processors.CanonicalizeModifications().run_system(system)
dssp_out = dssp.run_dssp(system, executable=DSSP_EXECUTABLE)
assert dssp_out == list("CC")
+
+
[email protected]('sequence, expected', [
+ ('H', '3'),
+ ('HH', '33'),
+ ('CHH', 'C33'),
+ ('HHHHHH', '113322'),
+ ('EHHHHHHC', 'E113322C'),
+ ('HHHHHHHHH', '1111H2222'),
+ ('CHHHHHHHHHC', 'C1111H2222C'),
+ ('CHHHHEHHHHC', 'C3333E3333C'),
+])
+def test_convert_dssp_to_martini(sequence, expected):
+ found = dssp.convert_dssp_to_martini(sequence)
+ assert expected == found
|
Martini 2.2 topologies for alpha-helical peptides are still incorrect
**Context**
This issue is related to Issue #560. Thank you for addressing the main problems mentioned in the issue. However, not all problems of the opened issue have been fixed.
**Short description**
While bead types for charged termini backbone beads of alpha-helical peptides in Martini 2.2 have been fixed (thank you for that!), the `vermouth-martinize` script treats the backbone beads _near_ the termini incorrectly.
**Detailed description**
The current dev version of `vermouth-martinize` (version 0.9.7_dev178) does not assign correct bead types to backbone beads of amino acids that are _near_ the peptide termini.
Using the command `martinize2 -f l20.pdb -o topol.top -x l20_martini.pdb -ss HHHHHHHHHHHHHHHHHHHH -p backbone -ff martini22`, `martinize2` generates the following `[ atoms ]` block:
```
[ atoms ]
1 Qd 1 LEU BB 1 1
2 AC1 1 LEU SC1 2 0.0
3 N0 2 LEU BB 3 0.0
4 AC1 2 LEU SC1 4 0.0
5 N0 3 LEU BB 5 0.0
6 AC1 3 LEU SC1 6 0.0
7 N0 4 LEU BB 7 0.0
8 AC1 4 LEU SC1 8 0.0
9 N0 5 LEU BB 9 0.0
10 AC1 5 LEU SC1 10 0.0
11 N0 6 LEU BB 11 0.0
12 AC1 6 LEU SC1 12 0.0
13 N0 7 LEU BB 13 0.0
14 AC1 7 LEU SC1 14 0.0
15 N0 8 LEU BB 15 0.0
16 AC1 8 LEU SC1 16 0.0
17 N0 9 LEU BB 17 0.0
18 AC1 9 LEU SC1 18 0.0
19 N0 10 LEU BB 19 0.0
20 AC1 10 LEU SC1 20 0.0
21 N0 11 LEU BB 21 0.0
22 AC1 11 LEU SC1 22 0.0
23 N0 12 LEU BB 23 0.0
24 AC1 12 LEU SC1 24 0.0
25 N0 13 LEU BB 25 0.0
26 AC1 13 LEU SC1 26 0.0
27 N0 14 LEU BB 27 0.0
28 AC1 14 LEU SC1 28 0.0
29 N0 15 LEU BB 29 0.0
30 AC1 15 LEU SC1 30 0.0
31 N0 16 LEU BB 31 0.0
32 AC1 16 LEU SC1 32 0.0
33 N0 17 LEU BB 33 0.0
34 AC1 17 LEU SC1 34 0.0
35 N0 18 LEU BB 35 0.0
36 AC1 18 LEU SC1 36 0.0
37 N0 19 LEU BB 37 0.0
38 AC1 19 LEU SC1 38 0.0
39 Qa 20 LEU BB 39 -1
40 AC1 20 LEU SC1 40 0.0
```
Meanwhile, using `martinize.py` script version 2.6_3 (`python martinize.py -f l20.pdb -o topol.top -x l20_martini.pdb -ss HHHHHHHHHHHHHHHHHHHH -p backbone`) generates this atom block:
```
[ atoms ]
1 Qd 1 LEU BB 1 1.0000 ; 1
2 AC1 1 LEU SC1 2 0.0000 ; 1
3 Nd 2 LEU BB 3 0.0000 ; 1
4 AC1 2 LEU SC1 4 0.0000 ; 1
5 Nd 3 LEU BB 5 0.0000 ; 1
6 AC1 3 LEU SC1 6 0.0000 ; 1
7 Nd 4 LEU BB 7 0.0000 ; 1
8 AC1 4 LEU SC1 8 0.0000 ; 1
9 N0 5 LEU BB 9 0.0000 ; H
10 AC1 5 LEU SC1 10 0.0000 ; H
11 N0 6 LEU BB 11 0.0000 ; H
12 AC1 6 LEU SC1 12 0.0000 ; H
13 N0 7 LEU BB 13 0.0000 ; H
14 AC1 7 LEU SC1 14 0.0000 ; H
15 N0 8 LEU BB 15 0.0000 ; H
16 AC1 8 LEU SC1 16 0.0000 ; H
17 N0 9 LEU BB 17 0.0000 ; H
18 AC1 9 LEU SC1 18 0.0000 ; H
19 N0 10 LEU BB 19 0.0000 ; H
20 AC1 10 LEU SC1 20 0.0000 ; H
21 N0 11 LEU BB 21 0.0000 ; H
22 AC1 11 LEU SC1 22 0.0000 ; H
23 N0 12 LEU BB 23 0.0000 ; H
24 AC1 12 LEU SC1 24 0.0000 ; H
25 N0 13 LEU BB 25 0.0000 ; H
26 AC1 13 LEU SC1 26 0.0000 ; H
27 N0 14 LEU BB 27 0.0000 ; H
28 AC1 14 LEU SC1 28 0.0000 ; H
29 N0 15 LEU BB 29 0.0000 ; H
30 AC1 15 LEU SC1 30 0.0000 ; H
31 N0 16 LEU BB 31 0.0000 ; H
32 AC1 16 LEU SC1 32 0.0000 ; H
33 Na 17 LEU BB 33 0.0000 ; 2
34 AC1 17 LEU SC1 34 0.0000 ; 2
35 Na 18 LEU BB 35 0.0000 ; 2
36 AC1 18 LEU SC1 36 0.0000 ; 2
37 Na 19 LEU BB 37 0.0000 ; 2
38 AC1 19 LEU SC1 38 0.0000 ; 2
39 Qa 20 LEU BB 39 -1.0000 ; 2
40 AC1 20 LEU SC1 40 0.0000 ; 2
```
While `martinize2` now generates correct bead types for the backbone beads of the N-terminus and the C-terminus (Qd and Qa, respectively), it does not properly modify the backbone beads of the amino acids near the termini (4 amino acids from the N/C-terminus, **including the first/last residue, if it is uncharged**). The backbone beads of leucines near the N-terminus should be of type Nd, while backbone beads of leucines near the C-terminus should be of type Na. Currently, `martinize2` assigns all leucine backbone beads in helices the type N0.
Refer to article https://doi.org/10.1021/ct700324x, specifically Table 2 for further details.
`martinize.py` achieves the correct behavior by defining three additional alpha-helical "structures" denoted as 1, 2, and 3 and replacing the helical elements at the start and end of the helix with them. Excerpt from the code of the `martinize.py` script:
```
# Helix start and end regions are special and require assignment of
# specific types. The following pattern substitutions are applied
# (in the given order). A dot matches any other type.
# Patterns can be added to the dictionaries. This only makes sense
# if for each key in patterns there is a matching key in pattypes.
patterns = {
"H": pat(".H. .HH. .HHH. .HHHH. .HHHHH. .HHHHHH. .HHHHHHH. .HHHH HHHH.") #@#
}
pattypes = {
"H": pat(".3. .33. .333. .3333. .13332. .113322. .1113222. .1111 2222.") #@#
}
```
Note that structure `3` is used for overlapping segments (that are close both to the N-terminus and the C-terminus, i.e. it is used in short helices). `Nda` bead type is used for the backbone beads in structure 3.
Also note that Gly, Pro, and Ala should be assigned yet different bead types in these terminal segments (as shown in the Table 2 in the article linked above).
|
0.0
|
cb8a9f68771e358bf1562fffa61304456347ef0d
|
[
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[H-3]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[HH-33]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[CHH-C33]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[HHHHHH-113322]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[HHHHHHHHH-1111H2222]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[CHHHHEHHHHC-C3333E3333C]"
] |
[
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_build_molecule[0]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_build_molecule[1]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_build_molecule[3]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_build_molecule[10]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_single_molecule[ABCDE]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_single_molecule[sequence1]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_single_molecule[sequence2]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_diff_sizes[ABCDEFGHIJKLMNO]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_diff_sizes[sequence1]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_diff_sizes[sequence2]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_cycle[ABCD]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_cycle[sequence1]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_cycle[sequence2]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_single_molecules_cycle_one",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_multi_molecules_cycle_one",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length[ABC]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length[ABCD]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length[ABCDEFGHIFKLMNOPQRSTU]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length[]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length_with_filter[ABC]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length_with_filter[ABCD]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length_with_filter[ABCDEFGHIFKLMNOPQRSTU]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length_with_filter[]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_wrong_length_with_filter[ABCDEFGHIJKLMNO]",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_empty_system_empty_sequence",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_empty_system_error",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_empty_with_filter",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_run_molecule",
"vermouth/tests/test_dssp.py::TestAnnotateResidues::test_run_molecule_not_selected",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/dssp_1bta.ssd-CEEEEETTTCCSHHHHHHHHHHHHTCCTTCCCSHHHHHHHHTTTSCSSEEEEEESTTHHHHTTTSSHHHHHHHHHHHHHTTCCEEEEEC]",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/mini-protein1_betasheet.pdb.v2.2.1-3b2-deb_cv1.ssd-CEEEEEETTEEEEEECCCCCCTTCEEEEC]",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/mini-protein1_betasheet.pdb.v3.0.0-3b1-deb_cv1.ssd-CEEEEEETTEEEEEECCCCCCTTCEEEEC]",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/mini-protein2_helix.pdb.v2.2.1-3b2-deb_cv1.ssd-CCSHHHHHHHHHHCCCCHHHHHHHHHHHTSCHHHHHHHTCCC]",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/mini-protein2_helix.pdb.v3.0.0-3b1-deb_cv1.ssd-CCSHHHHHHHHHHCCCCHHHHHHHHHHHTSCHHHHHHHTCCC]",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/mini-protein3_trp-cage.pdb.v2.2.1-3b2-deb_cv1.ssd-CHHHHHHHTTGGGGTCCCCC]",
"vermouth/tests/test_dssp.py::test_read_dssp2[/root/data/temp_dir/tmpp4fn1do3/marrink-lab__vermouth-martinize__0.0/vermouth/tests/data/dssp_tests/mini-protein3_trp-cage.pdb.v3.0.0-3b1-deb_cv1.ssd-CHHHHHHHTTGGGGTCCCCC]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[EHHHHHHC-E113322C]",
"vermouth/tests/test_dssp.py::test_convert_dssp_to_martini[CHHHHHHHHHC-C1111H2222C]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-31 10:19:53+00:00
|
apache-2.0
| 3,729 |
|
marshmallow-code__apispec-361
|
diff --git a/apispec/ext/marshmallow/openapi.py b/apispec/ext/marshmallow/openapi.py
index b18a21a..a254079 100644
--- a/apispec/ext/marshmallow/openapi.py
+++ b/apispec/ext/marshmallow/openapi.py
@@ -447,25 +447,22 @@ class OpenAPIConverter(object):
:param schema: schema to add to the spec
"""
-
- schema_cls = self.resolve_schema_class(schema)
- name = self.schema_name_resolver(schema_cls)
-
- if not name:
- try:
- return self.schema2jsonschema(schema)
- except RuntimeError:
- raise APISpecError(
- 'Name resolver returned None for schema {schema} which is '
- 'part of a chain of circular referencing schemas. Please'
- ' ensure that the schema_name_resolver passed to'
- ' MarshmallowPlugin returns a string for all circular'
- ' referencing schemas.'.format(schema=schema),
- )
-
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
if schema_key not in self.refs:
+ schema_cls = self.resolve_schema_class(schema)
+ name = self.schema_name_resolver(schema_cls)
+ if not name:
+ try:
+ return self.schema2jsonschema(schema)
+ except RuntimeError:
+ raise APISpecError(
+ 'Name resolver returned None for schema {schema} which is '
+ 'part of a chain of circular referencing schemas. Please'
+ ' ensure that the schema_name_resolver passed to'
+ ' MarshmallowPlugin returns a string for all circular'
+ ' referencing schemas.'.format(schema=schema),
+ )
name = get_unique_schema_name(self.spec.components, name)
self.spec.components.schema(
name,
|
marshmallow-code/apispec
|
599e5aed3100c394f0c9ac193d668576be2edf63
|
diff --git a/tests/test_ext_marshmallow.py b/tests/test_ext_marshmallow.py
index 72271ad..4437a29 100644
--- a/tests/test_ext_marshmallow.py
+++ b/tests/test_ext_marshmallow.py
@@ -425,6 +425,66 @@ class TestOperationHelper:
spec_fixture.spec,
) + 'Pet'
+ def test_schema_uses_ref_if_available_name_resolver_returns_none_v2(self):
+ def resolver(schema):
+ return None
+ spec = APISpec(
+ title='Test auto-reference',
+ version='0.1',
+ openapi_version='2.0',
+ plugins=(
+ MarshmallowPlugin(schema_name_resolver=resolver,),
+ ),
+ )
+ spec.components.schema('Pet', schema=PetSchema)
+ spec.path(
+ path='/pet',
+ operations={
+ 'get': {
+ 'responses': {
+ 200: {
+ 'schema': PetSchema,
+ },
+ },
+ },
+ },
+ )
+ get = get_paths(spec)['/pet']['get']
+ assert get['responses'][200]['schema']['$ref'] == ref_path(spec) + 'Pet'
+
+ def test_schema_uses_ref_if_available_name_resolver_returns_none_v3(self):
+ def resolver(schema):
+ return None
+ spec = APISpec(
+ title='Test auto-reference',
+ version='0.1',
+ openapi_version='3.0.0',
+ plugins=(
+ MarshmallowPlugin(schema_name_resolver=resolver,),
+ ),
+ )
+ spec.components.schema('Pet', schema=PetSchema)
+ spec.path(
+ path='/pet',
+ operations={
+ 'get': {
+ 'responses': {
+ 200: {
+ 'content': {
+ 'application/json': {
+ 'schema': PetSchema,
+ },
+ },
+ },
+ },
+ },
+ },
+ )
+ get = get_paths(spec)['/pet']['get']
+ assert get['responses'][200]['content']['application/json']['schema']['$ref'] == ref_path(
+ spec,
+ ) + 'Pet'
+
@pytest.mark.parametrize('spec_fixture', ('2.0', ), indirect=True)
def test_schema_uses_ref_in_parameters_and_request_body_if_available_v2(self, spec_fixture):
spec_fixture.spec.components.schema('Pet', schema=PetSchema)
diff --git a/tests/test_openapi.py b/tests/test_openapi.py
index df990b3..a8b83b5 100644
--- a/tests/test_openapi.py
+++ b/tests/test_openapi.py
@@ -648,6 +648,20 @@ class TestNesting:
assert props['category']['items'] == {'$ref': ref_path(spec_fixture.spec) + 'Category'}
+ @pytest.mark.parametrize('modifier', ('only', 'exclude'))
+ def test_schema2jsonschema_with_nested_fields_only_exclude(self, spec_fixture, modifier):
+ class Child(Schema):
+ i = fields.Int()
+ j = fields.Int()
+
+ class Parent(Schema):
+ child = fields.Nested(Child, **{modifier: ('i', )})
+
+ spec_fixture.openapi.schema2jsonschema(Parent)
+ props = get_definitions(spec_fixture.spec)['Child']['properties']
+ assert ('i' in props) == (modifier == 'only')
+ assert ('j' not in props) == (modifier == 'only')
+
def test_schema2jsonschema_with_nested_fields_with_adhoc_changes(self, spec_fixture):
category_schema = CategorySchema(many=True)
category_schema.fields['id'].required = True
|
Should marshmallow helpers respect `only`, `exclude`?
When introspecting `Nested` fields, we don't check for the `only` or `exclude` attributes. For example:
``` python
class FooSchema(Schema):
bar = fields.Str()
baz = fields.Str()
class BobSchema(Schema):
foo = fields.Nested(FooSchema, only=('bar', ))
```
If we call `schema2jsonschema(BobSchema)`, the nested `foo` will include `bar` and `baz` fields, even though `baz` will never be included in this case. Which isn't necessarily a problem, unless `baz` is required:
``` python
class FooSchema(Schema):
bar = fields.Str()
baz = fields.Str(required=True)
```
In this case, users of apispec will likely return JSON that wouldn't validate against the definition schema, since there's going to be a missing required field. I haven't actually encountered this situation, and I don't know how often it's going to come up--just wanted to raise for discussion.
|
0.0
|
599e5aed3100c394f0c9ac193d668576be2edf63
|
[
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v2",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v3"
] |
[
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[AnalysisWithListSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[3.0.0]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[2.0]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[3.0.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[2.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_with_ref[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_with_ref[3.0.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[2.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[3.0.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[2.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[3.0.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[2.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2choices_preserving_order[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2choices_preserving_order[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Integer-integer]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Number-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Float-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-String-string0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-String-string1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Boolean-boolean0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Boolean-boolean1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-UUID-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-DateTime-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Date-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Time-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Email-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Url-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Field-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Raw-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Integer-integer]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Number-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Float-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-String-string0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-String-string1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Boolean-boolean0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Boolean-boolean1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-UUID-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-DateTime-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Date-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Time-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Email-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Url-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Field-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Raw-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_formatted_field_translates_to_array[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_formatted_field_translates_to_array[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Integer-int32]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Float-float]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-UUID-uuid]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-DateTime-date-time]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Date-date]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Email-email]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Url-url]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Integer-int32]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Float-float]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-UUID-uuid]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-DateTime-date-time]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Date-date]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Email-email]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Url-url]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_description[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_description[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_callable[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_callable[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default_and_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default_and_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_location[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_location[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_multiple_json_locations[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields2parameters_does_not_modify_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields2parameters_does_not_modify_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_location_mapping[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_location_mapping[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping_if_schema_many[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_equal[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_equal[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_only_allows_valid_properties_in_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_only_allows_valid_properties_in_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices_multiple[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices_multiple[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_additional_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_additional_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_allow_none[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_allow_none[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_invalid_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_invalid_schema[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name_ma2[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name_ma2[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection_with_many[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection_with_many[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_invalid_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_invalid_schema[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_with_dump_only[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_many[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_default_in_body[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec_metadatas[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec_metadatas[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many_spec[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many_spec[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_ref[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_ref[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_without_name_raises_error[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_without_name_raises_error[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_many[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_many[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_ref_with_meta[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_ref_with_meta[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-only]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-exclude]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-only]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-exclude]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[3.0.0]",
"tests/test_openapi.py::TestNesting::test_nested_field_with_property[2.0]",
"tests/test_openapi.py::TestNesting::test_nested_field_with_property[3.0.0]",
"tests/test_openapi.py::test_openapi_tools_validate_v2",
"tests/test_openapi.py::test_openapi_tools_validate_v3",
"tests/test_openapi.py::TestFieldValidation::test_range[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_range[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_ranges[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_ranges[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_list_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_list_length[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_string_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_string_length[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_lengths[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_lengths[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_equal_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_equal_length[3.0.0]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-01-25 09:18:49+00:00
|
mit
| 3,730 |
|
marshmallow-code__apispec-365
|
diff --git a/apispec/core.py b/apispec/core.py
index 7fdf0d4..147c612 100644
--- a/apispec/core.py
+++ b/apispec/core.py
@@ -3,7 +3,7 @@
from collections import OrderedDict
from apispec.compat import iterkeys, iteritems
-from .exceptions import APISpecError, PluginMethodNotImplementedError
+from .exceptions import APISpecError, PluginMethodNotImplementedError, DuplicateComponentNameError
from .utils import OpenAPIVersion, deepupdate
VALID_METHODS = [
@@ -122,6 +122,10 @@ class Components(object):
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#definitionsObject
"""
+ if name in self._schemas:
+ raise DuplicateComponentNameError(
+ 'Another schema with name "{}" is already registered.'.format(name),
+ )
ret = {}
# Execute all helpers from plugins
for plugin in self._plugins:
@@ -147,6 +151,10 @@ class Components(object):
:param str location: location of the parameter.
:param dict kwargs: parameter fields.
"""
+ if param_id in self._parameters:
+ raise DuplicateComponentNameError(
+ 'Another parameter with name "{}" is already registered.'.format(param_id),
+ )
ret = kwargs.copy()
ret.setdefault('name', param_id)
ret['in'] = location
@@ -165,6 +173,10 @@ class Components(object):
:param str ref_id: ref_id to use as reference
:param dict kwargs: response fields
"""
+ if ref_id in self._responses:
+ raise DuplicateComponentNameError(
+ 'Another response with name "{}" is already registered.'.format(ref_id),
+ )
ret = kwargs.copy()
# Execute all helpers from plugins
for plugin in self._plugins:
@@ -257,7 +269,7 @@ class APISpec(object):
if ret is not None:
path = ret
if not path:
- raise APISpecError('Path template is not specified')
+ raise APISpecError('Path template is not specified.')
# Execute operation helpers
for plugin in self.plugins:
diff --git a/apispec/exceptions.py b/apispec/exceptions.py
index b382a27..0191810 100644
--- a/apispec/exceptions.py
+++ b/apispec/exceptions.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""Exception classes."""
-import warnings
class APISpecError(Exception):
"""Base class for all apispec-related errors."""
@@ -8,17 +7,8 @@ class APISpecError(Exception):
class PluginMethodNotImplementedError(APISpecError, NotImplementedError):
"""Raised when calling an unimplemented helper method in a plugin"""
+class DuplicateComponentNameError(APISpecError):
+ """Raised when registering two components with the same name"""
+
class OpenAPIError(APISpecError):
"""Raised when a OpenAPI spec validation fails."""
-
-class SwaggerError(OpenAPIError):
- """
- .. deprecated:: 0.38.0
- Use `apispec.exceptions.OpenAPIError` instead.
- """
- def __init__(self, *args, **kwargs):
- warnings.warn(
- 'SwaggerError is deprecated. Use OpenAPIError instead.',
- DeprecationWarning,
- )
- super(SwaggerError, self).__init__(*args, **kwargs)
diff --git a/apispec/ext/marshmallow/common.py b/apispec/ext/marshmallow/common.py
index 2cbbf01..f0ef880 100644
--- a/apispec/ext/marshmallow/common.py
+++ b/apispec/ext/marshmallow/common.py
@@ -43,12 +43,12 @@ def get_fields(schema):
return schema.fields
elif hasattr(schema, '_declared_fields'):
return copy.deepcopy(schema._declared_fields)
- raise ValueError("{0!r} doesn't have either `fields` or `_declared_fields`".format(schema))
+ raise ValueError("{0!r} doesn't have either `fields` or `_declared_fields`.".format(schema))
def make_schema_key(schema):
if not isinstance(schema, marshmallow.Schema):
- raise TypeError('can only make a schema key based on a Schema instance')
+ raise TypeError('can only make a schema key based on a Schema instance.')
modifiers = []
for modifier in MODIFIERS:
attribute = getattr(schema, modifier)
diff --git a/apispec/ext/marshmallow/openapi.py b/apispec/ext/marshmallow/openapi.py
index a254079..fa620d5 100644
--- a/apispec/ext/marshmallow/openapi.py
+++ b/apispec/ext/marshmallow/openapi.py
@@ -140,7 +140,7 @@ class OpenAPIConverter(object):
elif len(args) == 2:
openapi_type_field = args
else:
- raise TypeError('Pass core marshmallow field type or (type, fmt) pair')
+ raise TypeError('Pass core marshmallow field type or (type, fmt) pair.')
def inner(field_type):
self.field_mapping[field_type] = openapi_type_field
diff --git a/setup.py b/setup.py
index 6db211b..4483ff4 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ def find_version(fname):
version = m.group(1)
break
if not version:
- raise RuntimeError('Cannot find version information')
+ raise RuntimeError('Cannot find version information.')
return version
|
marshmallow-code/apispec
|
f12ca729cc117c3f70b75fbbfe2ccda352772576
|
diff --git a/tests/test_core.py b/tests/test_core.py
index 6405a35..b1b9aec 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -5,7 +5,7 @@ import pytest
import yaml
from apispec import APISpec, BasePlugin
-from apispec.exceptions import APISpecError
+from apispec.exceptions import APISpecError, DuplicateComponentNameError
from .utils import get_definitions, get_paths, get_parameters, get_responses
@@ -171,6 +171,14 @@ class TestDefinitions:
defs = spec.to_dict()['components']['schemas']
assert defs['Pet']['discriminator'] == 'name'
+ def test_definition_duplicate_name(self, spec):
+ spec.components.schema('Pet', properties=self.properties)
+ with pytest.raises(
+ DuplicateComponentNameError,
+ match='Another schema with name "Pet" is already registered.',
+ ):
+ spec.components.schema('Pet', properties=self.properties)
+
def test_to_yaml(self, spec):
enum = ['name', 'photoUrls']
spec.components.schema(
@@ -342,6 +350,15 @@ class TestPath:
assert 'param1' in params
assert 'param2' in params
+ def test_parameter_duplicate_name(self, spec):
+ route_spec = self.paths['/pet/{petId}']['get']
+ spec.components.parameter('test_parameter', 'path', **route_spec['parameters'][0])
+ with pytest.raises(
+ DuplicateComponentNameError,
+ match='Another parameter with name "test_parameter" is already registered.',
+ ):
+ spec.components.parameter('test_parameter', 'path', **route_spec['parameters'][0])
+
def test_response(self, spec):
route_spec = self.paths['/pet/{petId}']['get']
@@ -371,6 +388,15 @@ class TestPath:
assert 'resp1' in responses
assert 'resp2' in responses
+ def test_response_duplicate_name(self, spec):
+ route_spec = self.paths['/pet/{petId}']['get']
+ spec.components.response('test_response', **route_spec['responses']['200'])
+ with pytest.raises(
+ DuplicateComponentNameError,
+ match='Another response with name "test_response" is already registered.',
+ ):
+ spec.components.response('test_response', **route_spec['responses']['200'])
+
def test_path_check_invalid_http_method(self, spec):
spec.path('/pet/{petId}', operations={'get': {}})
spec.path('/pet/{petId}', operations={'x-dummy': {}})
diff --git a/tests/test_ext_marshmallow.py b/tests/test_ext_marshmallow.py
index 4437a29..d0d07df 100644
--- a/tests/test_ext_marshmallow.py
+++ b/tests/test_ext_marshmallow.py
@@ -128,7 +128,7 @@ class TestDefinitionHelper:
def test_warning_when_schema_added_twice(self, spec, schema):
spec.components.schema('Analysis', schema=schema)
with pytest.warns(UserWarning, match='has already been added to the spec'):
- spec.components.schema('Analysis', schema=schema)
+ spec.components.schema('DuplicateAnalysis', schema=schema)
def test_schema_instances_with_different_modifiers_added(self, spec):
class MultiModifierSchema(Schema):
diff --git a/tests/test_openapi.py b/tests/test_openapi.py
index b4e4e36..abe5368 100644
--- a/tests/test_openapi.py
+++ b/tests/test_openapi.py
@@ -412,12 +412,10 @@ class TestMarshmallowSchemaToModelDefinition:
class NotASchema(object):
pass
- with pytest.raises(ValueError) as excinfo:
+ expected_error = "{0!r} doesn't have either `fields` or `_declared_fields`.".format(NotASchema)
+ with pytest.raises(ValueError, match=expected_error):
openapi.schema2jsonschema(NotASchema)
- assert excinfo.value.args[0] == ("{0!r} doesn't have either `fields` "
- 'or `_declared_fields`'.format(NotASchema))
-
class TestMarshmallowSchemaToParameters:
@@ -546,12 +544,10 @@ class TestMarshmallowSchemaToParameters:
class NotASchema(object):
pass
- with pytest.raises(ValueError) as excinfo:
+ expected_error = "{0!r} doesn't have either `fields` or `_declared_fields`".format(NotASchema)
+ with pytest.raises(ValueError, match=expected_error):
openapi.schema2jsonschema(NotASchema)
- assert excinfo.value.args[0] == ("{0!r} doesn't have either `fields` "
- 'or `_declared_fields`'.format(NotASchema))
-
class CategorySchema(Schema):
id = fields.Int()
|
Raise exception upon name collision when registering components
When registering e.g. two schemas with the same name, the latter silently overwrites the former.
I think we should raise an Exception to inform the user.
Theoretically, it could break code where for some reason the user relied on this (like register a first version, then conditionally register a second version), but this shouldn't be a blocker.
|
0.0
|
f12ca729cc117c3f70b75fbbfe2ccda352772576
|
[
"tests/test_core.py::TestAPISpecInit::test_raises_wrong_apispec_version",
"tests/test_core.py::TestMetadata::test_openapi_metadata[2.0]",
"tests/test_core.py::TestMetadata::test_openapi_metadata[3.0.0]",
"tests/test_core.py::TestMetadata::test_openapi_metadata_merge_v3[3.0.0]",
"tests/test_core.py::TestTags::test_tag[2.0]",
"tests/test_core.py::TestTags::test_tag[3.0.0]",
"tests/test_core.py::TestTags::test_tag_is_chainable[2.0]",
"tests/test_core.py::TestTags::test_tag_is_chainable[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition[2.0]",
"tests/test_core.py::TestDefinitions::test_definition[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_is_chainable[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_is_chainable[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_description[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_description[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_stores_enum[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_stores_enum[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_extra_fields[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_extra_fields[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_duplicate_name[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_duplicate_name[3.0.0]",
"tests/test_core.py::TestDefinitions::test_to_yaml[2.0]",
"tests/test_core.py::TestDefinitions::test_to_yaml[3.0.0]",
"tests/test_core.py::TestPath::test_path[2.0]",
"tests/test_core.py::TestPath::test_path[3.0.0]",
"tests/test_core.py::TestPath::test_paths_maintain_order[2.0]",
"tests/test_core.py::TestPath::test_paths_maintain_order[3.0.0]",
"tests/test_core.py::TestPath::test_paths_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_paths_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_methods_maintain_order[2.0]",
"tests/test_core.py::TestPath::test_methods_maintain_order[3.0.0]",
"tests/test_core.py::TestPath::test_path_merges_paths[2.0]",
"tests/test_core.py::TestPath::test_path_merges_paths[3.0.0]",
"tests/test_core.py::TestPath::test_path_ensures_path_parameters_required[2.0]",
"tests/test_core.py::TestPath::test_path_ensures_path_parameters_required[3.0.0]",
"tests/test_core.py::TestPath::test_path_with_no_path_raises_error[2.0]",
"tests/test_core.py::TestPath::test_path_with_no_path_raises_error[3.0.0]",
"tests/test_core.py::TestPath::test_parameter[2.0]",
"tests/test_core.py::TestPath::test_parameter[3.0.0]",
"tests/test_core.py::TestPath::test_parameter_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_parameter_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_parameter_duplicate_name[2.0]",
"tests/test_core.py::TestPath::test_parameter_duplicate_name[3.0.0]",
"tests/test_core.py::TestPath::test_response[2.0]",
"tests/test_core.py::TestPath::test_response[3.0.0]",
"tests/test_core.py::TestPath::test_response_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_response_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_response_duplicate_name[2.0]",
"tests/test_core.py::TestPath::test_response_duplicate_name[3.0.0]",
"tests/test_core.py::TestPath::test_path_check_invalid_http_method[2.0]",
"tests/test_core.py::TestPath::test_path_check_invalid_http_method[3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_factory",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_operation_helper_is_used[2.0]",
"tests/test_core.py::TestPlugins::test_plugin_operation_helper_is_used[3.0.0]",
"tests/test_core.py::TestPluginsOrder::test_plugins_order",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[AnalysisWithListSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[3.0.0]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[2.0]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v2",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v3",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[3.0.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[2.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_with_ref[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_with_ref[3.0.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[2.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[3.0.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[2.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[3.0.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[2.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2choices_preserving_order[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2choices_preserving_order[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Integer-integer]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Number-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Float-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-String-string0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-String-string1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Boolean-boolean0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Boolean-boolean1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-UUID-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-DateTime-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Date-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Time-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Email-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Url-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Field-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Raw-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Integer-integer]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Number-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Float-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-String-string0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-String-string1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Boolean-boolean0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Boolean-boolean1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-UUID-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-DateTime-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Date-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Time-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Email-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Url-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Field-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Raw-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_formatted_field_translates_to_array[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_formatted_field_translates_to_array[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Integer-int32]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Float-float]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-UUID-uuid]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-DateTime-date-time]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Date-date]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Email-email]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Url-url]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Integer-int32]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Float-float]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-UUID-uuid]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-DateTime-date-time]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Date-date]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Email-email]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Url-url]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_description[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_description[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_callable[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_callable[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default_and_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default_and_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_location[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_location[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_multiple_json_locations[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields2parameters_does_not_modify_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields2parameters_does_not_modify_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_location_mapping[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_location_mapping[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping_if_schema_many[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_equal[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_equal[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_only_allows_valid_properties_in_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_only_allows_valid_properties_in_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices_multiple[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices_multiple[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_additional_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_additional_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_allow_none[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_allow_none[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_invalid_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_invalid_schema[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name_ma2[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name_ma2[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection_with_many[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection_with_many[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_invalid_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_invalid_schema[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_with_dump_only[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_many[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_default_in_body[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec_metadatas[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec_metadatas[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many_spec[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many_spec[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_ref[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_ref[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_without_name_raises_error[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_without_name_raises_error[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_many[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_many[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_ref_with_meta[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_self_ref_with_meta[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-only]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-exclude]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-only]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-exclude]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[3.0.0]",
"tests/test_openapi.py::TestNesting::test_nested_field_with_property[2.0]",
"tests/test_openapi.py::TestNesting::test_nested_field_with_property[3.0.0]",
"tests/test_openapi.py::test_openapi_tools_validate_v2",
"tests/test_openapi.py::test_openapi_tools_validate_v3",
"tests/test_openapi.py::TestFieldValidation::test_range[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_range[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_ranges[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_ranges[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_list_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_list_length[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_string_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_string_length[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_lengths[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_lengths[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_equal_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_equal_length[3.0.0]"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-29 11:11:13+00:00
|
mit
| 3,731 |
|
marshmallow-code__apispec-382
|
diff --git a/apispec/core.py b/apispec/core.py
index b3142e7..ddada9e 100644
--- a/apispec/core.py
+++ b/apispec/core.py
@@ -277,13 +277,17 @@ class APISpec(object):
self._tags.append(tag)
return self
- def path(self, path=None, operations=None, **kwargs):
+ def path(
+ self, path=None, operations=None, summary=None, description=None, **kwargs
+ ):
"""Add a new path object to the spec.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#path-item-object
:param str|None path: URL path component
:param dict|None operations: describes the http methods and options for `path`
+ :param str summary: short summary relevant to all operations in this path
+ :param str description: long description relevant to all operations in this path
:param dict kwargs: parameters used by any path helpers see :meth:`register_path_helper`
"""
operations = operations or OrderedDict()
@@ -309,4 +313,8 @@ class APISpec(object):
clean_operations(operations, self.openapi_version.major)
self._paths.setdefault(path, operations).update(operations)
+ if summary is not None:
+ self._paths[path]["summary"] = summary
+ if description is not None:
+ self._paths[path]["description"] = description
return self
|
marshmallow-code/apispec
|
f630cfcb69679f5b64ff1b2e9e2ddacc7cf51c55
|
diff --git a/tests/test_core.py b/tests/test_core.py
index 594c035..cd13657 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -298,6 +298,15 @@ class TestPath:
spec.path()
assert "Path template is not specified" in str(excinfo)
+ def test_path_summary_description(self, spec):
+ summary = "Operations on a Pet"
+ description = "Operations on a Pet identified by its ID"
+ spec.path(path="/pet/{petId}", summary=summary, description=description)
+
+ p = get_paths(spec)["/pet/{petId}"]
+ assert p["summary"] == summary
+ assert p["description"] == description
+
def test_parameter(self, spec):
route_spec = self.paths["/pet/{petId}"]["get"]
|
Add support for `summary` and `description` fields for paths
Documented [here](https://swagger.io/docs/specification/paths-and-operations/):
> Paths may have an optional short summary and a longer description for documentation purposes. This information is supposed to be relevant to all operations in this path. description can be multi-line and supports Markdown (CommonMark) for rich text representation.
|
0.0
|
f630cfcb69679f5b64ff1b2e9e2ddacc7cf51c55
|
[
"tests/test_core.py::TestPath::test_path_summary_description[2.0]",
"tests/test_core.py::TestPath::test_path_summary_description[3.0.0]"
] |
[
"tests/test_core.py::TestAPISpecInit::test_raises_wrong_apispec_version",
"tests/test_core.py::TestMetadata::test_openapi_metadata[2.0]",
"tests/test_core.py::TestMetadata::test_openapi_metadata[3.0.0]",
"tests/test_core.py::TestMetadata::test_openapi_metadata_merge_v3[3.0.0]",
"tests/test_core.py::TestTags::test_tag[2.0]",
"tests/test_core.py::TestTags::test_tag[3.0.0]",
"tests/test_core.py::TestTags::test_tag_is_chainable[2.0]",
"tests/test_core.py::TestTags::test_tag_is_chainable[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition[2.0]",
"tests/test_core.py::TestDefinitions::test_definition[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_is_chainable[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_is_chainable[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_description[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_description[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_stores_enum[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_stores_enum[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_extra_fields[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_extra_fields[3.0.0]",
"tests/test_core.py::TestDefinitions::test_definition_duplicate_name[2.0]",
"tests/test_core.py::TestDefinitions::test_definition_duplicate_name[3.0.0]",
"tests/test_core.py::TestDefinitions::test_to_yaml[2.0]",
"tests/test_core.py::TestDefinitions::test_to_yaml[3.0.0]",
"tests/test_core.py::TestPath::test_path[2.0]",
"tests/test_core.py::TestPath::test_path[3.0.0]",
"tests/test_core.py::TestPath::test_paths_maintain_order[2.0]",
"tests/test_core.py::TestPath::test_paths_maintain_order[3.0.0]",
"tests/test_core.py::TestPath::test_paths_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_paths_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_methods_maintain_order[2.0]",
"tests/test_core.py::TestPath::test_methods_maintain_order[3.0.0]",
"tests/test_core.py::TestPath::test_path_merges_paths[2.0]",
"tests/test_core.py::TestPath::test_path_merges_paths[3.0.0]",
"tests/test_core.py::TestPath::test_path_ensures_path_parameters_required[2.0]",
"tests/test_core.py::TestPath::test_path_ensures_path_parameters_required[3.0.0]",
"tests/test_core.py::TestPath::test_path_with_no_path_raises_error[2.0]",
"tests/test_core.py::TestPath::test_path_with_no_path_raises_error[3.0.0]",
"tests/test_core.py::TestPath::test_parameter[2.0]",
"tests/test_core.py::TestPath::test_parameter[3.0.0]",
"tests/test_core.py::TestPath::test_parameter_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_parameter_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_parameter_duplicate_name[2.0]",
"tests/test_core.py::TestPath::test_parameter_duplicate_name[3.0.0]",
"tests/test_core.py::TestPath::test_response[2.0]",
"tests/test_core.py::TestPath::test_response[3.0.0]",
"tests/test_core.py::TestPath::test_response_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_response_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_response_duplicate_name[2.0]",
"tests/test_core.py::TestPath::test_response_duplicate_name[3.0.0]",
"tests/test_core.py::TestPath::test_security_scheme[2.0]",
"tests/test_core.py::TestPath::test_security_scheme[3.0.0]",
"tests/test_core.py::TestPath::test_security_scheme_is_chainable[2.0]",
"tests/test_core.py::TestPath::test_security_scheme_is_chainable[3.0.0]",
"tests/test_core.py::TestPath::test_security_scheme_duplicate_name[2.0]",
"tests/test_core.py::TestPath::test_security_scheme_duplicate_name[3.0.0]",
"tests/test_core.py::TestPath::test_path_check_invalid_http_method[2.0]",
"tests/test_core.py::TestPath::test_path_check_invalid_http_method[3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_factory",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_schema_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_parameter_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_response_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[True-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[True-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[False-2.0]",
"tests/test_core.py::TestPlugins::test_plugin_path_helper_is_used[False-3.0.0]",
"tests/test_core.py::TestPlugins::test_plugin_operation_helper_is_used[2.0]",
"tests/test_core.py::TestPlugins::test_plugin_operation_helper_is_used[3.0.0]",
"tests/test_core.py::TestPluginsOrder::test_plugins_order"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-04 21:07:49+00:00
|
mit
| 3,732 |
|
marshmallow-code__apispec-386
|
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index c8e3f96..4a5fbde 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -15,6 +15,12 @@ Features:
from ``Regexp`` validators (:pr:`364`).
Thanks :user:`DStape` for the PR.
+Bug fixes:
+
+- [apispec.ext.marshmallow]: Fix automatic documentation of schemas when
+ using ``Nested(MySchema, many==True)`` (:issue:`383`). Thanks
+ :user:`whoiswes` for reporting.
+
Other changes:
- *Backwards-incompatible*: Components properties are now passed as dictionaries rather than keyword arguments (:pr:`381`).
diff --git a/apispec/ext/marshmallow/openapi.py b/apispec/ext/marshmallow/openapi.py
index 4bffa36..259d2dd 100644
--- a/apispec/ext/marshmallow/openapi.py
+++ b/apispec/ext/marshmallow/openapi.py
@@ -450,7 +450,7 @@ class OpenAPIConverter(object):
name = self.schema_name_resolver(schema_cls)
if not name:
try:
- return self.schema2jsonschema(schema)
+ json_schema = self.schema2jsonschema(schema)
except RuntimeError:
raise APISpecError(
"Name resolver returned None for schema {schema} which is "
@@ -459,6 +459,9 @@ class OpenAPIConverter(object):
" MarshmallowPlugin returns a string for all circular"
" referencing schemas.".format(schema=schema)
)
+ if getattr(schema, "many", False):
+ return {"type": "array", "items": json_schema}
+ return json_schema
name = get_unique_schema_name(self.spec.components, name)
self.spec.components.schema(name, schema=schema)
return self.get_ref_dict(schema_instance)
@@ -649,9 +652,6 @@ class OpenAPIConverter(object):
if hasattr(Meta, "description"):
jsonschema["description"] = Meta.description
- if getattr(schema, "many", False):
- jsonschema = {"type": "array", "items": jsonschema}
-
return jsonschema
def fields2jsonschema(self, fields, ordered=False, partial=None):
|
marshmallow-code/apispec
|
bf3faf4125d98396912d9c7d3e5bbe55aa179106
|
diff --git a/tests/test_ext_marshmallow.py b/tests/test_ext_marshmallow.py
index f474b5f..ce1083d 100644
--- a/tests/test_ext_marshmallow.py
+++ b/tests/test_ext_marshmallow.py
@@ -102,7 +102,6 @@ class TestDefinitionHelper:
@pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()])
def test_resolve_schema_dict_auto_reference_return_none(self, schema):
- # this resolver return None
def resolver(schema):
return None
@@ -162,6 +161,27 @@ class TestDefinitionHelper:
assert "Pet" in definitions
assert "Pet1" in definitions
+ def test_resolve_nested_schema_many_true_resolver_return_none(self):
+ def resolver(schema):
+ return None
+
+ class PetFamilySchema(Schema):
+ pets_1 = Nested(PetSchema, many=True)
+ pets_2 = List(Nested(PetSchema))
+
+ spec = APISpec(
+ title="Test auto-reference",
+ version="0.1",
+ openapi_version="2.0",
+ plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
+ )
+
+ spec.components.schema("PetFamily", schema=PetFamilySchema)
+ props = get_schemas(spec)["PetFamily"]["properties"]
+ pets_1 = props["pets_1"]
+ pets_2 = props["pets_2"]
+ assert pets_1["type"] == pets_2["type"] == "array"
+
class TestComponentParameterHelper:
@pytest.mark.parametrize("schema", [PetSchema, PetSchema()])
@@ -250,7 +270,8 @@ class TestCustomField:
class TestOperationHelper:
@pytest.mark.parametrize(
- "pet_schema", (PetSchema, PetSchema(), "tests.schemas.PetSchema")
+ "pet_schema",
+ (PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"),
)
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_v2(self, spec_fixture, pet_schema):
@@ -268,7 +289,11 @@ class TestOperationHelper:
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
- reference = get["responses"][200]["schema"]
+ if isinstance(pet_schema, Schema) and pet_schema.many is True:
+ assert get["responses"][200]["schema"]["type"] == "array"
+ reference = get["responses"][200]["schema"]["items"]
+ else:
+ reference = get["responses"][200]["schema"]
assert reference == {"$ref": ref_path(spec_fixture.spec) + "Pet"}
assert len(spec_fixture.spec.components._schemas) == 1
resolved_schema = spec_fixture.spec.components._schemas["Pet"]
@@ -276,7 +301,8 @@ class TestOperationHelper:
assert get["responses"][200]["description"] == "successful operation"
@pytest.mark.parametrize(
- "pet_schema", (PetSchema, PetSchema(), "tests.schemas.PetSchema")
+ "pet_schema",
+ (PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"),
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_v3(self, spec_fixture, pet_schema):
@@ -294,7 +320,16 @@ class TestOperationHelper:
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
- reference = get["responses"][200]["content"]["application/json"]["schema"]
+ if isinstance(pet_schema, Schema) and pet_schema.many is True:
+ assert (
+ get["responses"][200]["content"]["application/json"]["schema"]["type"]
+ == "array"
+ )
+ reference = get["responses"][200]["content"]["application/json"]["schema"][
+ "items"
+ ]
+ else:
+ reference = get["responses"][200]["content"]["application/json"]["schema"]
assert reference == {"$ref": ref_path(spec_fixture.spec) + "Pet"}
assert len(spec_fixture.spec.components._schemas) == 1
diff --git a/tests/test_openapi.py b/tests/test_openapi.py
index 41b2298..3a64ef3 100644
--- a/tests/test_openapi.py
+++ b/tests/test_openapi.py
@@ -413,25 +413,16 @@ class TestMarshmallowSchemaToModelDefinition:
res = openapi.fields2jsonschema(fields_dict)
assert res["required"] == ["id"]
- def test_schema_instance_inspection(self, openapi):
+ @pytest.mark.parametrize("many", (True, False))
+ def test_schema_instance_inspection(self, openapi, many):
class UserSchema(Schema):
_id = fields.Int()
- res = openapi.schema2jsonschema(UserSchema())
+ res = openapi.schema2jsonschema(UserSchema(many=many))
assert res["type"] == "object"
props = res["properties"]
assert "_id" in props
- def test_schema_instance_inspection_with_many(self, openapi):
- class UserSchema(Schema):
- _id = fields.Int()
-
- res = openapi.schema2jsonschema(UserSchema(many=True))
- assert res["type"] == "array"
- assert "items" in res
- props = res["items"]["properties"]
- assert "_id" in props
-
def test_raises_error_if_no_declared_fields(self, openapi):
class NotASchema(object):
pass
@@ -654,7 +645,7 @@ class TestNesting:
def test_schema2jsonschema_with_nested_fields_with_adhoc_changes(
self, spec_fixture
):
- category_schema = CategorySchema(many=True)
+ category_schema = CategorySchema()
category_schema.fields["id"].required = True
class PetSchema(Schema):
@@ -667,10 +658,10 @@ class TestNesting:
assert props["Category"] == spec_fixture.openapi.schema2jsonschema(
category_schema
)
- assert set(props["Category"]["items"]["required"]) == {"id", "name"}
+ assert set(props["Category"]["required"]) == {"id", "name"}
- props["Category"]["items"]["required"] = ["name"]
- assert props["Category"]["items"] == spec_fixture.openapi.schema2jsonschema(
+ props["Category"]["required"] = ["name"]
+ assert props["Category"] == spec_fixture.openapi.schema2jsonschema(
CategorySchema
)
|
Docstring-only schemas incorrectly referenced as arrays?
Tested with Marshmallow 3.0.0rc3 and APISpec 1.0.0rc1.
Took a copy of the APISpec quickstart code and removed the defintions, so that schemas were only being referenced from the docstrings. I'm seeing that the nested schema is being picked up as an array (incorrect) but then is also referenced as an array from the parent schema (correct).
Example (as short as I can make it, ignoring imports):
```py
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version="3.0.2",
plugins=[FlaskPlugin(), MarshmallowPlugin()],
)
app = Flask(__name__)
class CategorySchema(Schema):
id = fields.Int()
name = fields.Str(required=True)
class PetSchema(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
@app.route("/pet")
def random_pet():
"""
---
get:
responses:
200:
content:
application/json:
schema: PetSchema
"""
return jsonify({'foo':'bar'})
@app.route("/category")
def random_category():
"""
---
get:
responses:
200:
content:
application/json:
schema: CategorySchema
"""
return jsonify({'foo':'bar'})
with app.test_request_context():
spec.path(view=random_pet)
spec.path(view=random_category)
print(spec.to_dict()
```
Here's what gets output (note that both the nested reference and the child are both array types):
```py
"schemas": {
"Category": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer", "format": "int32"}
}
}
},
"Pet": {
"type": "object",
"properties": {
"category": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Category"
}
}
}
}
}
```
I think the issue stems from fields2jsonschema method [here](https://github.com/marshmallow-code/apispec/blob/dev/apispec/ext/marshmallow/openapi.py#L647-L648) where the schema attributes are checked for many=True. I would expect the schemas themselves would always be referenced as singletons, then references to those schemas would be checked for many. I certainly might be missing something though, but at least in my lab just commenting out those 2 lines fixed everything.
Alternately, the schemas could be registered as they are in the example, but we try to do everything in docstrings if possible. So if this is a non-issue feel free to close.
Many thanks for some awesome libraries!
|
0.0
|
bf3faf4125d98396912d9c7d3e5bbe55aa179106
|
[
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-pet_schema2]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-pet_schema2]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[2.0-True]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[3.0.0-True]"
] |
[
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[AnalysisWithListSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_nested_schema_many_true_resolver_return_none",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[2.0]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v2",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v3",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[3.0.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[2.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[3.0.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[2.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[3.0.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[2.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[3.0.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[2.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2choices_preserving_order[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2choices_preserving_order[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Integer-integer]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Number-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Float-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-String-string0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-String-string1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Boolean-boolean0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Boolean-boolean1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-UUID-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-DateTime-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Date-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Time-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Email-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Url-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Field-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[2.0-Raw-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Integer-integer]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Number-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Float-number]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-String-string0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-String-string1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Boolean-boolean0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Boolean-boolean1]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-UUID-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-DateTime-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Date-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Time-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Email-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Url-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Field-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_type[3.0.0-Raw-string]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_formatted_field_translates_to_array[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_formatted_field_translates_to_array[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Integer-int32]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Float-float]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-UUID-uuid]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-DateTime-date-time]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Date-date]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Email-email]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[2.0-Url-url]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Integer-int32]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Float-float]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-UUID-uuid]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-DateTime-date-time]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Date-date]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Email-email]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field2property_formats[3.0.0-Url-url]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_description[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_description[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_boolean_false_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_callable[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_missing_callable[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default_and_missing[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_doc_default_and_missing[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_missing_load[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_location[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_location[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_multiple_json_locations[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields2parameters_does_not_modify_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields2parameters_does_not_modify_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_location_mapping[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_location_mapping[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_default_location_mapping_if_schema_many[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_fields_with_dump_only[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_equal[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_equal[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_only_allows_valid_properties_in_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_only_allows_valid_properties_in_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices_multiple[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_choices_multiple[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_additional_metadata[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_additional_metadata[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_allow_none[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_allow_none[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_str_regex[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_str_regex[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_pattern_obj_regex[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_pattern_obj_regex[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_no_pattern[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_no_pattern[3.0.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_multiple_patterns[2.0]",
"tests/test_openapi.py::TestMarshmallowFieldToOpenAPI::test_field_with_multiple_patterns[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_invalid_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_invalid_schema[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_with_explicit_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name_ma2[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema2jsonschema_override_name_ma2[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_required_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_partial[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_no_required_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_title_and_description_may_be_added[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_excluded_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_only_explicitly_declared_fields_are_translated[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_observed_field_name_for_required_field[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[2.0-False]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_schema_instance_inspection[3.0.0-False]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToModelDefinition::test_raises_error_if_no_declared_fields[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_multiple[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_field_required[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_invalid_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_invalid_schema[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_with_dump_only[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_body_many[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_schema_query_instance_many_should_raise_exception[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_default_in_body[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_fields_query[3.0.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[2.0]",
"tests/test_openapi.py::TestMarshmallowSchemaToParameters::test_raises_error_if_not_a_schema[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec_metadatas[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec_metadatas[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_spec[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many_spec[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many_spec[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_ref[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_ref[3.0.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many[2.0]",
"tests/test_openapi.py::TestNesting::test_field2property_nested_many[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-only]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[2.0-exclude]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-only]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_only_exclude[3.0.0-exclude]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_fields_with_adhoc_changes[3.0.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[2.0]",
"tests/test_openapi.py::TestNesting::test_schema2jsonschema_with_nested_excluded_fields[3.0.0]",
"tests/test_openapi.py::TestNesting::test_nested_field_with_property[2.0]",
"tests/test_openapi.py::TestNesting::test_nested_field_with_property[3.0.0]",
"tests/test_openapi.py::test_openapi_tools_validate_v2",
"tests/test_openapi.py::test_openapi_tools_validate_v3",
"tests/test_openapi.py::TestFieldValidation::test_range[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_range[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_ranges[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_ranges[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_list_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_list_length[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_string_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_string_length[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_lengths[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_multiple_lengths[3.0.0]",
"tests/test_openapi.py::TestFieldValidation::test_equal_length[2.0]",
"tests/test_openapi.py::TestFieldValidation::test_equal_length[3.0.0]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-07 15:32:59+00:00
|
mit
| 3,733 |
|
marshmallow-code__apispec-424
|
diff --git a/src/apispec/core.py b/src/apispec/core.py
index 33304a8..1b7a105 100644
--- a/src/apispec/core.py
+++ b/src/apispec/core.py
@@ -45,7 +45,7 @@ def clean_operations(operations, openapi_major_version):
Otherwise, it is assumed to be a reference name as string and the corresponding $ref
string is returned.
- :param str obj_type: 'parameter' or 'response'
+ :param str obj_type: "parameter" or "response"
:param dict|str obj: parameter or response in dict form or as ref_id string
:param int openapi_major_version: The major version of the OpenAPI standard
"""
@@ -67,10 +67,12 @@ def clean_operations(operations, openapi_major_version):
get_ref("parameter", p, openapi_major_version) for p in parameters
]
if "responses" in operation:
+ responses = OrderedDict()
for code, response in iteritems(operation["responses"]):
- operation["responses"][code] = get_ref(
+ responses[str(code)] = get_ref(
"response", response, openapi_major_version
)
+ operation["responses"] = responses
class Components(object):
|
marshmallow-code/apispec
|
d6e798e6a281ab7f77ce754bd9a32f763bd64a31
|
diff --git a/tests/test_ext_marshmallow.py b/tests/test_ext_marshmallow.py
index c1ffee1..e734e52 100644
--- a/tests/test_ext_marshmallow.py
+++ b/tests/test_ext_marshmallow.py
@@ -306,24 +306,24 @@ class TestOperationHelper:
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
if isinstance(pet_schema, Schema) and pet_schema.many is True:
- assert get["responses"][200]["schema"]["type"] == "array"
- schema_reference = get["responses"][200]["schema"]["items"]
+ assert get["responses"]["200"]["schema"]["type"] == "array"
+ schema_reference = get["responses"]["200"]["schema"]["items"]
assert (
- get["responses"][200]["headers"]["PetHeader"]["schema"]["type"]
+ get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"]
== "array"
)
- header_reference = get["responses"][200]["headers"]["PetHeader"]["schema"][
- "items"
- ]
+ header_reference = get["responses"]["200"]["headers"]["PetHeader"][
+ "schema"
+ ]["items"]
else:
- schema_reference = get["responses"][200]["schema"]
- header_reference = get["responses"][200]["headers"]["PetHeader"]["schema"]
+ schema_reference = get["responses"]["200"]["schema"]
+ header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"]
assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert len(spec_fixture.spec.components._schemas) == 1
resolved_schema = spec_fixture.spec.components._schemas["Pet"]
assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema)
- assert get["responses"][200]["description"] == "successful operation"
+ assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize(
"pet_schema",
@@ -348,31 +348,31 @@ class TestOperationHelper:
get = get_paths(spec_fixture.spec)["/pet"]["get"]
if isinstance(pet_schema, Schema) and pet_schema.many is True:
assert (
- get["responses"][200]["content"]["application/json"]["schema"]["type"]
+ get["responses"]["200"]["content"]["application/json"]["schema"]["type"]
== "array"
)
- schema_reference = get["responses"][200]["content"]["application/json"][
+ schema_reference = get["responses"]["200"]["content"]["application/json"][
"schema"
]["items"]
assert (
- get["responses"][200]["headers"]["PetHeader"]["schema"]["type"]
+ get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"]
== "array"
)
- header_reference = get["responses"][200]["headers"]["PetHeader"]["schema"][
- "items"
- ]
+ header_reference = get["responses"]["200"]["headers"]["PetHeader"][
+ "schema"
+ ]["items"]
else:
- schema_reference = get["responses"][200]["content"]["application/json"][
+ schema_reference = get["responses"]["200"]["content"]["application/json"][
"schema"
]
- header_reference = get["responses"][200]["headers"]["PetHeader"]["schema"]
+ header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"]
assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert len(spec_fixture.spec.components._schemas) == 1
resolved_schema = spec_fixture.spec.components._schemas["Pet"]
assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema)
- assert get["responses"][200]["description"] == "successful operation"
+ assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_expand_parameters_v2(self, spec_fixture):
@@ -447,7 +447,7 @@ class TestOperationHelper:
path="/pet", operations={"get": {"responses": {200: {"schema": PetSchema}}}}
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
- assert get["responses"][200]["schema"] == build_ref(
+ assert get["responses"]["200"]["schema"] == build_ref(
spec_fixture.spec, "schema", "Pet"
)
@@ -465,7 +465,7 @@ class TestOperationHelper:
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
- assert get["responses"][200]["content"]["application/json"][
+ assert get["responses"]["200"]["content"]["application/json"][
"schema"
] == build_ref(spec_fixture.spec, "schema", "Pet")
@@ -484,7 +484,7 @@ class TestOperationHelper:
path="/pet", operations={"get": {"responses": {200: {"schema": PetSchema}}}}
)
get = get_paths(spec)["/pet"]["get"]
- assert get["responses"][200]["schema"] == build_ref(spec, "schema", "Pet")
+ assert get["responses"]["200"]["schema"] == build_ref(spec, "schema", "Pet")
def test_schema_uses_ref_if_available_name_resolver_returns_none_v3(self):
def resolver(schema):
@@ -508,7 +508,7 @@ class TestOperationHelper:
},
)
get = get_paths(spec)["/pet"]["get"]
- assert get["responses"][200]["content"]["application/json"][
+ assert get["responses"]["200"]["content"]["application/json"][
"schema"
] == build_ref(spec, "schema", "Pet")
@@ -577,7 +577,7 @@ class TestOperationHelper:
"items": build_ref(spec_fixture.spec, "schema", "Pet"),
}
assert get["parameters"][0]["schema"] == resolved_schema
- assert get["responses"][200]["schema"] == resolved_schema
+ assert get["responses"]["200"]["schema"] == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_array_uses_ref_if_available_v3(self, spec_fixture):
@@ -616,7 +616,9 @@ class TestOperationHelper:
}
request_schema = get["parameters"][0]["content"]["application/json"]["schema"]
assert request_schema == resolved_schema
- response_schema = get["responses"][200]["content"]["application/json"]["schema"]
+ response_schema = get["responses"]["200"]["content"]["application/json"][
+ "schema"
+ ]
assert response_schema == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
@@ -641,7 +643,7 @@ class TestOperationHelper:
},
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
- assert get["responses"][200]["schema"] == {
+ assert get["responses"]["200"]["schema"] == {
"type": "object",
"properties": {
"mother": build_ref(spec_fixture.spec, "schema", "Pet"),
@@ -675,7 +677,7 @@ class TestOperationHelper:
},
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
- assert get["responses"][200]["content"]["application/json"]["schema"] == {
+ assert get["responses"]["200"]["content"]["application/json"]["schema"] == {
"type": "object",
"properties": {
"mother": build_ref(spec_fixture.spec, "schema", "Pet"),
|
Consider coercing response codes to strings
The OpenAPI specification [allows responses of the form 2XX, 4XX, etc](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#patterned-fields-1). Using this library, it's possible in a docstring to write something like this:
```yaml
responses:
200:
content:
application/json:
schema: GoodResponseSchema
4XX:
content:
application/json:
schema: RequestErrorSchema
```
This causes the following error:
```
File "/Users/zach/.pyenv/versions/3.6.5/lib/python3.6/json/encoder.py", line 353, in _iterencode_dict
items = sorted(dct.items(), key=lambda kv: kv[0])
TypeError: '<' not supported between instances of 'str' and 'int'
```
Which happens because the `200` key is stored as an `int`, and the `4XX` key is stored as a string. A preprocessing step to make them all strings would fix that problem.
|
0.0
|
d6e798e6a281ab7f77ce754bd9a32f763bd64a31
|
[
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-pet_schema2]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v2[2.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-pet_schema1]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-pet_schema2]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_v3[3.0.0-tests.schemas.PetSchema]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v2",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_if_available_name_resolver_returns_none_v3",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_array_uses_ref_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_partially_v3[3.0.0]"
] |
[
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_can_use_schema_as_definition[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_helper_without_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[AnalysisWithListSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_in_list[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_schema_dict_auto_reference_return_none[schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-AnalysisSchema]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_warning_when_schema_added_twice[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_instances_with_different_modifiers_added[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[2.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_schema_with_clashing_names[3.0.0]",
"tests/test_ext_marshmallow.py::TestDefinitionHelper::test_resolve_nested_schema_many_true_resolver_return_none",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentParameterHelper::test_can_use_schema_in_parameter[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response_header[2.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response_header[2.0-schema1]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response_header[3.0.0-PetSchema]",
"tests/test_ext_marshmallow.py::TestComponentResponseHelper::test_can_use_schema_in_response_header[3.0.0-schema1]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[2.0]",
"tests/test_ext_marshmallow.py::TestCustomField::test_can_use_custom_field_decorator[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_expand_parameters_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v2[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_uses_ref_in_parameters_and_request_body_if_available_v3[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_parameter_reference[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_parameter_reference[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_response_reference[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_response_reference[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2json[3.0.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[2.0]",
"tests/test_ext_marshmallow.py::TestOperationHelper::test_schema_global_state_untouched_2parameters[3.0.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[2.0]",
"tests/test_ext_marshmallow.py::TestCircularReference::test_circular_referencing_schemas[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_single[3.0.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[2.0]",
"tests/test_ext_marshmallow.py::TestSelfReference::test_self_referencing_field_many[3.0.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[2.0]",
"tests/test_ext_marshmallow.py::TestOrderedSchema::test_ordered_schema[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props[3.0.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[2.0]",
"tests/test_ext_marshmallow.py::TestFieldWithCustomProps::test_field_with_custom_props_passed_as_snake_case[3.0.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[2.0]",
"tests/test_ext_marshmallow.py::TestSchemaWithDefaultValues::test_schema_with_default_values[3.0.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[2.0]",
"tests/test_ext_marshmallow.py::TestList::test_list_with_nested[3.0.0]"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-06 21:50:12+00:00
|
mit
| 3,734 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.